diff --git a/spaces/123Kumar/vits-uma-genshin-honkai123/Docker/Dockerfile b/spaces/123Kumar/vits-uma-genshin-honkai123/Docker/Dockerfile deleted file mode 100644 index 4d39cdf02a2ec151686cc1d61234bf723068fed8..0000000000000000000000000000000000000000 --- a/spaces/123Kumar/vits-uma-genshin-honkai123/Docker/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -FROM python:3.9-bullseye -VOLUME ["/app"] -WORKDIR /app -# Set apt to Chinese mirror -RUN sed -i 's/deb.debian.org/mirrors.ustc.edu.cn/g' /etc/apt/sources.list -RUN apt-get update && apt-get -y install cmake git -RUN git clone https://huggingface.co/spaces/ikechan8370/vits-uma-genshin-honkai -WORKDIR /app/vits-uma-genshin-honkai -RUN sed -i "s/\.launch()/\.launch(server_name=\"0.0.0.0\")/" /app/vits-uma-genshin-honkai/app.py -ADD vits.sh /app/vits.sh -EXPOSE 7860 -ENTRYPOINT [ "/app/vits.sh" ] \ No newline at end of file diff --git a/spaces/17TheWord/RealESRGAN/scripts/pytorch2onnx.py b/spaces/17TheWord/RealESRGAN/scripts/pytorch2onnx.py deleted file mode 100644 index 09d99b2e0171265e70e7507ed8e882b616b449a1..0000000000000000000000000000000000000000 --- a/spaces/17TheWord/RealESRGAN/scripts/pytorch2onnx.py +++ /dev/null @@ -1,36 +0,0 @@ -import argparse -import torch -import torch.onnx -from basicsr.archs.rrdbnet_arch import RRDBNet - - -def main(args): - # An instance of the model - model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4) - if args.params: - keyname = 'params' - else: - keyname = 'params_ema' - model.load_state_dict(torch.load(args.input)[keyname]) - # set the train mode to false since we will only run the forward pass. - model.train(False) - model.cpu().eval() - - # An example input - x = torch.rand(1, 3, 64, 64) - # Export the model - with torch.no_grad(): - torch_out = torch.onnx._export(model, x, args.output, opset_version=11, export_params=True) - print(torch_out.shape) - - -if __name__ == '__main__': - """Convert pytorch model to onnx models""" - parser = argparse.ArgumentParser() - parser.add_argument( - '--input', type=str, default='experiments/pretrained_models/RealESRGAN_x4plus.pth', help='Input model path') - parser.add_argument('--output', type=str, default='realesrgan-x4.onnx', help='Output onnx path') - parser.add_argument('--params', action='store_false', help='Use params instead of params_ema') - args = parser.parse_args() - - main(args) diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Application Android Sur Geant Cx 88 Hd - HOT.md b/spaces/1gistliPinn/ChatGPT4/Examples/Application Android Sur Geant Cx 88 Hd - HOT.md deleted file mode 100644 index dd25969903c5935dda58ad96bccd853cd508426a..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Application Android Sur Geant Cx 88 Hd - HOT.md +++ /dev/null @@ -1,6 +0,0 @@ -

Application Android Sur Geant Cx 88 Hd -


Download ··· https://imgfil.com/2uy0F2



-
-Applications Android Pour Geant 88 Hd New DOWNLOAD. ... update Geant GN- CX 8 Flash Samsat HD70 To HD80 GALAXY *W3 ON 16/06/ Upgrade By USB. 1fdad05405
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/CRACK Intuit QuickBooks Enterprise 18.0 R3 License Key FREE.md b/spaces/1gistliPinn/ChatGPT4/Examples/CRACK Intuit QuickBooks Enterprise 18.0 R3 License Key FREE.md deleted file mode 100644 index 0cff63ec2f4289ea09cc0d4a3d78e9180559a15a..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/CRACK Intuit QuickBooks Enterprise 18.0 R3 License Key FREE.md +++ /dev/null @@ -1,6 +0,0 @@ -

CRACK Intuit QuickBooks Enterprise 18.0 R3 License Key


Download ✔✔✔ https://imgfil.com/2uxXTt



-
-Intuit QuickBooks Enterprise Accountant 2016 16.0 R3 Final - Small business ... idm 6.23QuickBooks Pro 2018 Serial Key With Crack Download. 1fdad05405
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Filmora9 [REPACK] Crack License Key Full [Latest].md b/spaces/1gistliPinn/ChatGPT4/Examples/Filmora9 [REPACK] Crack License Key Full [Latest].md deleted file mode 100644 index f8cea04c2d76145893534b82900479520066a8db..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Filmora9 [REPACK] Crack License Key Full [Latest].md +++ /dev/null @@ -1,6 +0,0 @@ -

Filmora9 Crack License Key Full [Latest]


DOWNLOAD 🆓 https://imgfil.com/2uy0RU



- -... Full download of Wondershare Filmora 9, free activation of Wondershare Filmora 9, free serial key Wondershare Filmora 9 with licensed email registration code .. . ## What's new in Filmora 9 Final? 8a78ff9644
-
-
-

diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Descarga el APK de WhatsApp Messenger y accede a chats llamadas y videollamadas cifradas.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Descarga el APK de WhatsApp Messenger y accede a chats llamadas y videollamadas cifradas.md deleted file mode 100644 index c3ccf53461e7030b609052bbb297f2160fdedf26..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Descarga el APK de WhatsApp Messenger y accede a chats llamadas y videollamadas cifradas.md +++ /dev/null @@ -1,139 +0,0 @@ - -

Descargar WhatsApp Messenger APK: Cómo y por qué hacerlo

-

WhatsApp Messenger es una de las aplicaciones de mensajería más populares y utilizadas en todo el mundo. Con más de 2000 millones de usuarios activos, WhatsApp te permite enviar mensajes de texto, voz, imágenes, vídeos, documentos y otros tipos de archivos a tus contactos, así como realizar llamadas y videollamadas gratuitas a través de internet. Además, WhatsApp cuenta con un cifrado de extremo a extremo que protege la privacidad y seguridad de tus conversaciones.

-

descargar whatsapp messenger apk


Download ––– https://urlin.us/2uSXm7



-

Pero ¿qué pasa si quieres descargar el APK de WhatsApp Messenger? ¿Qué ventajas tiene hacerlo? ¿Qué riesgos implica? ¿Qué alternativas hay a WhatsApp? En este artículo te explicamos todo lo que necesitas saber sobre cómo y por qué descargar el APK de WhatsApp Messenger.

-

Qué es WhatsApp Messenger y qué ventajas tiene

-

WhatsApp Messenger es una aplicación de mensajería instantánea que funciona con la tecnología Voice over IP (VoIP), es decir, que utiliza la conexión a internet para enviar y recibir mensajes y llamadas. WhatsApp es propiedad de Facebook, Inc., que la adquirió en 2014 por 19 mil millones de dólares.

-

Una aplicación de mensajería gratuita y multiplataforma

-

Una de las principales ventajas de WhatsApp es que es una aplicación gratuita, que no requiere ningún tipo de suscripción ni pago para usarla. Solo necesitas tener un número de teléfono válido y una conexión a internet, ya sea por datos móviles o por Wi-Fi. Además, WhatsApp es compatible con diferentes plataformas móviles, como Android, iOS, Windows Phone, BlackBerry OS y otras. También puedes usar WhatsApp en tu ordenador, a través de un navegador web o de una aplicación de escritorio.

-

Una forma fácil y cómoda de hablar con tus amigos

-

Otra ventaja de WhatsApp es que te permite comunicarte con tus amigos y familiares de forma fácil y cómoda, sin importar dónde se encuentren. WhatsApp te muestra automáticamente todos los contactos que tienen la aplicación instalada en su teléfono, y puedes iniciar un chat individual o grupal con ellos. También puedes realizar llamadas o videollamadas individuales o grupales, con hasta ocho participantes en una misma conversación. Además, puedes compartir tu ubicación, tu estado, tus contactos, GIFs, stickers y emojis con tus contactos.

-

Una herramienta segura y con muchas funciones

-

Por último, otra ventaja de WhatsApp es que es una herramienta segura y con muchas funciones. WhatsApp utiliza un protocolo de cifrado de extremo a extremo, que significa que solo tú y el destinatario pueden ver el contenido de los mensajes y las llamadas que envías o recibes. Nadie más, ni siquiera WhatsApp o Facebook, puede acceder a ellos. Además, WhatsApp te ofrece la posibilidad de enviar mensajes de voz, crear encuestas, configurar mensajes que se autodestruyen, bloquear contactos indeseados, silenciar chats o grupos, personalizar tu perfil y mucho más.

-

descargar whatsapp messenger apk gratis
-descargar whatsapp messenger apk ultima version
-descargar whatsapp messenger apk para android
-descargar whatsapp messenger apk mod
-descargar whatsapp messenger apk sin play store
-descargar whatsapp messenger apk 2023
-descargar whatsapp messenger apk para pc
-descargar whatsapp messenger apk full
-descargar whatsapp messenger apk beta
-descargar whatsapp messenger apk uptodown
-descargar whatsapp messenger apk antiguo
-descargar whatsapp messenger apk para tablet
-descargar whatsapp messenger apk mega
-descargar whatsapp messenger apk sin conexion
-descargar whatsapp messenger apk con stickers
-descargar whatsapp messenger apk plus
-descargar whatsapp messenger apk para iphone
-descargar whatsapp messenger apk pro
-descargar whatsapp messenger apk lite
-descargar whatsapp messenger apk transparente
-descargar whatsapp messenger apk oficial
-descargar whatsapp messenger apk desde el sitio web
-descargar whatsapp messenger apk con videollamadas
-descargar whatsapp messenger apk sin anuncios
-descargar whatsapp messenger apk premium
-descargar whatsapp messenger apk 2.23.13.6
-descargar whatsapp messenger apk para smart tv
-descargar whatsapp messenger apk con temas
-descargar whatsapp messenger apk con emojis nuevos
-descargar whatsapp messenger apk con estados
-descargar whatsapp messenger apk sin verificacion de numero
-descargar whatsapp messenger apk con respaldo de chats
-descargar whatsapp messenger apk con modo oscuro
-descargar whatsapp messenger apk con privacidad mejorada
-descargar whatsapp messenger apk con mensajes temporales
-descargar whatsapp messenger apk con cifrado de extremo a extremo
-descargar whatsapp messenger apk con grupos de hasta 256 personas
-descargar whatsapp messenger apk con llamadas y mensajes gratis
-descargar whatsapp messenger apk con envio de archivos de hasta 100 MB
-descargar whatsapp messenger apk con compatibilidad con otras apps de mensajeria
-descargar whatsapp messenger apk con notificaciones personalizadas
-descargar whatsapp messenger apk con bloqueo de contactos indeseados
-descargar whatsapp messenger apk con eliminacion de mensajes enviados por error
-descargar whatsapp messenger apk con silenciamiento de chats y grupos
-descargar whatsapp messenger apk con marcacion de mensajes como no leidos
-descargar whatsapp messenger apk con fijacion de chats favoritos
-descargar whatsapp messenger apk con uso compartido de ubicacion en tiempo real
-descargar whatsapp messenger apk con creacion de stickers personalizados
-descargar whatsapp messenger apk con envio de GIFs y memes
-descargar whatsapp messenger apk con acceso a la web y al escritorio

-

Cómo descargar el APK de WhatsApp Messenger

-

Si quieres descargar el APK de WhatsApp Messenger, debes saber qué es un APK y cómo instalarlo en tu dispositivo. Un APK es un archivo que contiene el paquete de instalación de una aplicación para Android. Normalmente, cuando descargas una aplicación desde la tienda oficial de Google Play, esta se instala automáticamente en tu teléfono. Sin embargo, hay ocasiones en las que puedes querer descargar el APK de una aplicación desde otra fuente, como por ejemplo, una página web o un servicio de almacenamiento en la nube.

-

Los requisitos para instalar el APK

-

Para poder instalar el APK de WhatsApp Messenger, necesitas cumplir con algunos requisitos previos. Estos son:

- -

Los pasos para descargar e instalar el APK

-

Una vez que hayas cumplido con los requisitos anteriores, puedes seguir estos pasos para descargar e instalar el APK de WhatsApp Messenger:

-
    -
  1. Accede a la página web oficial de WhatsApp y haz clic en el botón Descargar ahora.
  2. -
  3. Espera a que se descargue el archivo APK en tu teléfono. Puedes ver el progreso de la descarga en la barra de notificaciones.
  4. -
  5. Cuando se haya completado la descarga, abre el archivo APK desde el gestor de archivos o desde la notificación.
  6. -
  7. Acepta los permisos que te solicita la aplicación y sigue las instrucciones que aparecen en la pantalla para completar la instalación.
  8. -
  9. Abre la aplicación de WhatsApp y verifica tu número de teléfono siguiendo los pasos que te indica la aplicación.
  10. -
  11. Disfruta de WhatsApp Messenger en tu teléfono Android.
  12. -
-

Los riesgos y precauciones al usar el APK

-

Aunque descargar e instalar el APK de WhatsApp Messenger puede tener algunas ventajas, como por ejemplo, acceder a las últimas actualizaciones antes que nadie o evitar las restricciones geográficas o de compatibilidad, también implica algunos riesgos y precauciones que debes tener en cuenta. Estos son:

- -

Por lo tanto, te recomendamos que solo descargues e instales el APK de WhatsApp Messenger si sabes lo que estás haciendo y si confías en la fuente desde donde lo obtienes. Además, te aconsejamos que mantengas tu teléfono protegido con un antivirus y que hagas copias de seguridad periódicas de tus chats y archivos.

-

Qué alternativas hay a WhatsApp Messenger

-

Si no quieres descargar el APK de WhatsApp Messenger o si quieres probar otras opciones de mensajería instantánea, existen algunas alternativas a WhatsApp que puedes considerar. Estas son algunas de las más populares y destacadas:

-

Signal: una opción más segura y privada

-

Signal es una aplicación de mensajería instantánea que se caracteriza por su alto nivel de seguridad y privacidad. Signal utiliza un cifrado de extremo a extremo más avanzado que el de WhatsApp, y no recopila ni almacena ningún dato personal ni metadato de sus usuarios. Además, Signal ofrece funciones como mensajes que se autodestruyen, bloqueo con huella dactilar o código PIN, verificación de seguridad y protección contra capturas de pantalla. Signal es una aplicación gratuita y de código abierto, que puedes descargar desde Google Play o desde su página web oficial.

-

Telegram: una opción más versátil y divertida

-

Telegram es una aplicación de mensajería instantánea que se distingue por su versatilidad y diversión. Telegram te permite enviar mensajes de texto, voz, imágenes, vídeos, documentos y otros tipos de archivos a tus contactos, así como realizar llamadas y videollamadas gratuitas a través de internet. Además, Telegram cuenta con funciones como chats secretos, bots, canales, grupos con hasta 200 mil miembros, stickers, GIFs, juegos y mucho más. Telegram es una aplicación gratuita y de código abierto, que puedes descargar desde Google Play o desde su página web oficial.

-

iMessage: una opción solo para usuarios de Apple

-

iMessage es una aplicación de mensajería instantánea que solo está disponible para los usuarios de dispositivos Apple, como iPhone, iPad o Mac. iMessage te permite enviar mensajes de texto, voz, imágenes, vídeos, documentos y otros tipos de archivos a tus contactos que también tengan un dispositivo Apple, así como realizar llamadas y videollamadas gratuitas a través de internet. Además, iMessage cuenta con funciones como efectos de texto e imagen, animojis, memojis, stickers, Apple Pay y mucho más. iMessage es una aplicación gratuita y que viene integrada en el sistema operativo iOS o macOS.

-

Conclusión

-

En este artículo te hemos explicado cómo y por qué descargar el APK de WhatsApp Messenger, una de las aplicaciones de mensajería más populares y utilizadas en todo el mundo. WhatsApp te ofrece la posibilidad de enviar mensajes y realizar llamadas gratuitas a tus contactos, con un alto nivel de seguridad y privacidad. Sin embargo, descargar el APK de WhatsApp también implica algunos riesgos y precauciones que debes tener en cuenta, como la exposición a virus o malware, la pérdida de funciones o la violación de los términos y condiciones de uso. Por eso, te recomendamos que solo descargues e instales el APK de WhatsApp si sabes lo que estás haciendo y si confías en la fuente desde donde lo obtienes. Además, te hemos presentado algunas alternativas a WhatsApp que puedes considerar, como Signal, Telegram o iMessage, que también te ofrecen servicios similares o mejores que WhatsApp.

-

Esperamos que este artículo te haya sido útil e interesante. Si tienes alguna duda o comentario sobre el tema, no dudes en dejarnos un mensaje. Y si te ha gustado el artículo, compártelo con tus amigos en las redes sociales. ¡Gracias por leernos!

-

Preguntas frecuentes

-

A continuación te respondemos algunas preguntas frecuentes sobre el tema de descargar el APK de WhatsApp Messenger:

-

¿Qué significa APK?

-

APK significa Android Package Kit o Android Application Package. Es un formato de archivo que contiene el paquete de instalación de una aplicación para Android.

-

¿Qué ventajas tiene descargar el APK de WhatsApp?

-

Descargar el APK de WhatsApp puede tener algunas ventajas, como por ejemplo:

- -

¿Qué riesgos tiene descargar el APK de WhatsApp?

-

Descargar el APK de WhatsApp puede tener algunos riesgos, como por ejemplo:

- -

¿Qué alternativas hay a WhatsApp?

-

Algunas alternativas a WhatsApp son:

- -

¿Cómo puedo descargar el APK de WhatsApp?

-

Puedes descargar el APK de WhatsApp siguiendo estos pasos:

-
    -
  1. Accede a la página web oficial de WhatsApp y haz clic en el botón Descargar ahora.
  2. -
  3. Espera a que se descargue el archivo APK en tu teléfono. Puedes ver el progreso de la descarga en la barra de notificaciones.
  4. -
  5. Cuando se haya completado la descarga, abre el archivo APK desde el gestor de archivos o desde la notificación.
  6. -
  7. Acepta los permisos que te solicita la aplicación y sigue las instrucciones que aparecen en la pantalla para completar la instalación.
  8. -
  9. Abre la aplicación de WhatsApp y verifica tu número de teléfono siguiendo los pasos que te indica la aplicación.
  10. -
-

Si tienes algún problema o duda al descargar o instalar el APK de WhatsApp, puedes consultar la sección de ayuda de la página web oficial de WhatsApp o contactar con su servicio de atención al cliente.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Become a Football Legend with Vive le Football for Android.md b/spaces/1phancelerku/anime-remove-background/Become a Football Legend with Vive le Football for Android.md deleted file mode 100644 index d52af5165546f4a214823cb254d4e3723ba5478b..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Become a Football Legend with Vive le Football for Android.md +++ /dev/null @@ -1,110 +0,0 @@ - -

Free Download Vive le Football: A Guide to the Ultimate Football Game

-

If you are a fan of football (or soccer, as some may call it), you must have heard of Vive le Football, the latest sports game from NetEase Games. This game promises to deliver the most realistic and immersive football experience on your devices, with stunning graphics, licensed teams and players, tactical gameplay, and online and offline modes. In this article, we will show you how to download Vive le Football for free on your PC, Mac, Android, or iOS device, and what are the features and benefits of playing this game.

-

free download vive le football


DOWNLOADhttps://jinyurl.com/2uNPL7



-

What is Vive le Football?

-

Vive le Football is a football simulation game developed by NetEase Games, one of the leading game developers in China. The game was released in September 2022, and has received positive reviews from players and critics alike. The game aims to provide a realistic and authentic football experience, with licenses from FIFPro and the Chinese national team, as well as other famous clubs and leagues from around the world. The game also features advanced physics and graphics engine, smart AI, and various game modes to suit different preferences and skill levels.

-

Features of Vive le Football

-

Here are some of the main features of Vive le Football that make it stand out from other football games:

-

Realistic graphics and physics

-

Vive le Football uses Unreal Engine 4 to create stunning visuals and animations for the game. The game also uses a realistic physics system that simulates the movement and collision of the ball, players, and environment. You can see the sweat, dirt, grass, and weather effects on the players and the pitch, as well as the facial expressions and emotions of the players. The game also supports 4K resolution and 60 FPS for a smooth and immersive gameplay.

-

How to free download vive le football for android
-Free download vive le football apk latest version
-Vive le football free mobile football management game
-Free download vive le football mod apk unlimited money
-Best tips and tricks for free download vive le football
-Free download vive le football offline mode
-Vive le football review: a free and realistic football game
-Free download vive le football for PC windows 10
-Vive le football cheats: how to get free coins and gems
-Free download vive le football hack tool no survey
-Vive le football gameplay: how to play and win matches
-Free download vive le football for iOS devices
-Vive le football features: what makes it different from other games
-Free download vive le football update: what's new and improved
-Vive le football guide: how to build your dream team
-Free download vive le football online multiplayer mode
-Vive le football ratings: how does it compare to other games
-Free download vive le football for mac os x
-Vive le football support: how to contact the developers and get help
-Free download vive le football beta version: how to join and test the game
-Vive le football community: how to connect with other players and fans
-Free download vive le football for android tv
-Vive le football news: what's the latest information and announcements
-Free download vive le football for firestick
-Vive le football forum: where to discuss and share your opinions
-Free download vive le football for chromebook
-Vive le football wiki: where to find all the information and resources
-Free download vive le football for linux
-Vive le football blog: where to read and write about the game
-Free download vive le football for roku
-Vive le football podcast: where to listen and learn about the game
-Free download vive le football for smart tv
-Vive le football video: where to watch and enjoy the game
-Free download vive le football for xbox one
-Vive le football social media: where to follow and interact with the game
-Free download vive le football for ps4
-Vive le football merchandise: where to buy and show your support
-Free download vive le football for nintendo switch
-Vive le football events: where to join and participate in the game activities
-Free download vive le football for windows phone

-

Licensed teams and players

-

Vive le Football has official licenses from FIFPro and the Chinese national team, as well as other popular clubs and leagues from Europe, Asia, America, and Africa. You can play with or against some of the best players in the world, such as Cristiano Ronaldo, Lionel Messi, Neymar Jr., Kylian Mbappé, Mohamed Salah, Harry Kane, Robert Lewandowski, Kevin De Bruyne, Sergio Ramos, Virgil van Dijk, Manuel Neuer, Alisson Becker, and many more. You can also customize your own team with your favorite players, kits, badges, stadiums, and sponsors.

-

Tactical gameplay and AI

-

Vive le Football is not just about scoring goals. It is also about creating strategies and tactics to outsmart your opponents. You can choose from different formations, styles, roles, and instructions for your team, as well as adjust them during the match. You can also use various skills and tricks to dribble past defenders, pass accurately, shoot powerfully, tackle cleanly, save brilliantly, and more. The game also features a smart AI that adapts to your actions and decisions, making each match challenging and unpredictable.

-

Online and offline modes

-

Vive le Football offers various game modes for you to enjoy. You can play online with or against other players from around the world in real-time matches or tournaments. You can also join or create your own club with your friends or other players, and compete for glory and rewards in club leagues or cups. You can also play offline in single-player mode or local multiplayer mode with your friends or family on the same device. You can also play career mode or manager mode to experience the life of a professional football player or a football manager. You can also play training mode or challenge mode to improve your skills and test your limits.

-

How to download Vive le Football for free?

-

Vive le Football is a free-to-play game that you can download and play on your PC, Mac, Android, or iOS device. Here are the steps to download Vive le Football for free on different platforms:

-

Download Vive le Football on PC & Mac with BlueStacks

-

If you want to play Vive le Football on your PC or Mac, you can use BlueStacks, a popular Android emulator that allows you to run Android apps and games on your computer. BlueStacks is free, safe, and easy to use, and it offers many features and benefits for gamers.

-

Steps to install BlueStacks and Vive le Football

-

Here are the steps to install BlueStacks and Vive le Football on your PC or Mac:

-
    -
  1. Go to the official website of BlueStacks and download the latest version of the emulator for your PC or Mac.
  2. -
  3. Run the installer and follow the instructions to complete the installation process.
  4. -
  5. Launch BlueStacks and sign in with your Google account. If you don't have one, you can create one for free.
  6. -
  7. Go to the Google Play Store app on BlueStacks and search for Vive le Football. Alternatively, you can use this link to go directly to the game page.
  8. -
  9. Click on the Install button and wait for the game to download and install on BlueStacks.
  10. -
  11. Once the installation is done, you can find the game icon on the home screen of BlueStacks. Click on it to launch the game and enjoy playing Vive le Football on your PC or Mac.
  12. -
-

Benefits of playing Vive le Football on PC & Mac

-

Here are some of the benefits of playing Vive le Football on PC & Mac with BlueStacks:

- -

Download Vive le Football on Android and iOS devices

-

If you want to play Vive le Football on your Android or iOS device, you can download it from the Google Play Store or the App Store respectively. The game is compatible with most of the modern smartphones and tablets that meet the minimum requirements.

-

Steps to download Vive le Football from Google Play Store or App Store

-

Here are the steps to download Vive le Football from Google Play Store or App Store on your Android or iOS device:

-
    -
  1. Open the Google Play Store app or the App Store app on your device.
  2. -
  3. Search for Vive le Football in the search bar. Alternatively, you can use these links to go directly to the game page.
  4. -
  5. Tap on the Install button (for Android) or the Get button (for iOS) and wait for the game to download and install on your device.
  6. -
  7. Once the installation is done, you can find the game icon on your device's home screen or app drawer. Tap on it to launch the game and enjoy playing Vive le Football on your device.
  8. -
-

Requirements and compatibility of Vive le Football on mobile devices

-

Here are the minimum requirements and compatibility of Vive le Football on mobile devices:

- | Platform | OS Version | RAM | Storage | | Android | 5.0 or higher | 2 GB or higher | 1 GB or higher | | iOS | 10.0 or higher | 2 GB or higher | 1 GB or higher |

Note: These are the minimum requirements for running the game smoothly. The actual performance may vary depending on your device's specifications and settings.

-

Conclusion

-

Vive le Football is a football simulation game that offers a realistic and immersive football experience on your devices. You can play with licensed teams and players, create your own strategies and tactics, use various skills and tricks, and compete in online and offline modes. You can download Vive le Football for free on your PC, Mac, Android, or iOS device by following the steps mentioned above. If you are a football lover , you should definitely give Vive le Football a try and see for yourself why it is one of the best football games in the market.

-

FAQs

-

Here are some of the frequently asked questions about Vive le Football:

-
    -
  1. Is Vive le Football free to play?
  2. -

    Yes, Vive le Football is free to play. However, the game may contain some optional in-app purchases that can enhance your gameplay or unlock some premium features.

    -
  3. Is Vive le Football online or offline?
  4. -

    Vive le Football supports both online and offline modes. You can play online with or against other players from around the world, or play offline in single-player mode or local multiplayer mode with your friends or family on the same device.

    -
  5. How can I update Vive le Football?
  6. -

    Vive le Football is regularly updated with new features, content, and improvements. You can update the game by going to the Google Play Store app or the App Store app on your device and checking for any available updates. Alternatively, you can also enable automatic updates for the game in your device's settings.

    -
  7. How can I contact the support team of Vive le Football?
  8. -

    If you have any issues, questions, or feedback regarding Vive le Football, you can contact the support team of NetEase Games by going to the game's settings and tapping on the Customer Service button. You can also visit the official website or the social media pages of NetEase Games for more information and updates.

    -
  9. How can I play Vive le Football on PC & Mac?
  10. -

    If you want to play Vive le Football on PC & Mac, you can use BlueStacks, a popular Android emulator that allows you to run Android apps and games on your computer. You can download BlueStacks from its official website and follow the steps mentioned above to install and play Vive le Football on your PC & Mac.

    -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/2ndelement/voicevox/voicevox_engine/engine_manifest/EngineManifestLoader.py b/spaces/2ndelement/voicevox/voicevox_engine/engine_manifest/EngineManifestLoader.py deleted file mode 100644 index bec6a2a7b6ec7f7316eace37db102070ba437a21..0000000000000000000000000000000000000000 --- a/spaces/2ndelement/voicevox/voicevox_engine/engine_manifest/EngineManifestLoader.py +++ /dev/null @@ -1,46 +0,0 @@ -import json -from base64 import b64encode -from pathlib import Path - -from .EngineManifest import EngineManifest, LicenseInfo, UpdateInfo - - -class EngineManifestLoader: - def __init__(self, manifest_path: Path, root_dir: Path): - self.manifest_path = manifest_path - self.root_dir = root_dir - - def load_manifest(self) -> EngineManifest: - manifest = json.loads(self.manifest_path.read_text(encoding="utf-8")) - - manifest = EngineManifest( - manifest_version=manifest["manifest_version"], - name=manifest["name"], - brand_name=manifest["brand_name"], - uuid=manifest["uuid"], - url=manifest["url"], - default_sampling_rate=manifest["default_sampling_rate"], - icon=b64encode((self.root_dir / manifest["icon"]).read_bytes()).decode( - "utf-8" - ), - terms_of_service=(self.root_dir / manifest["terms_of_service"]).read_text( - "utf-8" - ), - update_infos=[ - UpdateInfo(**update_info) - for update_info in json.loads( - (self.root_dir / manifest["update_infos"]).read_text("utf-8") - ) - ], - dependency_licenses=[ - LicenseInfo(**license_info) - for license_info in json.loads( - (self.root_dir / manifest["dependency_licenses"]).read_text("utf-8") - ) - ], - supported_features={ - key: item["value"] - for key, item in manifest["supported_features"].items() - }, - ) - return manifest diff --git a/spaces/4Taps/SadTalker/modules/sadtalker_test.py b/spaces/4Taps/SadTalker/modules/sadtalker_test.py deleted file mode 100644 index 34d9699f71fcd6d8f413f9cc96926dd6ceff36b1..0000000000000000000000000000000000000000 --- a/spaces/4Taps/SadTalker/modules/sadtalker_test.py +++ /dev/null @@ -1,118 +0,0 @@ -import torch -import os, sys, shutil -from src.utils.preprocess import CropAndExtract -from src.test_audio2coeff import Audio2Coeff -from src.facerender.animate import AnimateFromCoeff -from src.generate_batch import get_data -from src.generate_facerender_batch import get_facerender_data -import uuid - -from pydub import AudioSegment - -def mp3_to_wav(mp3_filename,wav_filename,frame_rate): - mp3_file = AudioSegment.from_file(file=mp3_filename) - mp3_file.set_frame_rate(frame_rate).export(wav_filename,format="wav") - -from modules.text2speech import text2speech - -class SadTalker(): - - def __init__(self, checkpoint_path='checkpoints'): - - if torch.cuda.is_available() : - device = "cuda" - else: - device = "cpu" - - # current_code_path = sys.argv[0] - # modules_path = os.path.split(current_code_path)[0] - - current_root_path = './' - - os.environ['TORCH_HOME']=os.path.join(current_root_path, 'checkpoints') - - path_of_lm_croper = os.path.join(current_root_path, 'checkpoints', 'shape_predictor_68_face_landmarks.dat') - path_of_net_recon_model = os.path.join(current_root_path, 'checkpoints', 'epoch_20.pth') - dir_of_BFM_fitting = os.path.join(current_root_path, 'checkpoints', 'BFM_Fitting') - wav2lip_checkpoint = os.path.join(current_root_path, 'checkpoints', 'wav2lip.pth') - - audio2pose_checkpoint = os.path.join(current_root_path, 'checkpoints', 'auido2pose_00140-model.pth') - audio2pose_yaml_path = os.path.join(current_root_path, 'config', 'auido2pose.yaml') - - audio2exp_checkpoint = os.path.join(current_root_path, 'checkpoints', 'auido2exp_00300-model.pth') - audio2exp_yaml_path = os.path.join(current_root_path, 'config', 'auido2exp.yaml') - - free_view_checkpoint = os.path.join(current_root_path, 'checkpoints', 'facevid2vid_00189-model.pth.tar') - mapping_checkpoint = os.path.join(current_root_path, 'checkpoints', 'mapping_00229-model.pth.tar') - facerender_yaml_path = os.path.join(current_root_path, 'config', 'facerender.yaml') - - #init model - print(path_of_lm_croper) - self.preprocess_model = CropAndExtract(path_of_lm_croper, path_of_net_recon_model, dir_of_BFM_fitting, device) - - print(audio2pose_checkpoint) - self.audio_to_coeff = Audio2Coeff(audio2pose_checkpoint, audio2pose_yaml_path, - audio2exp_checkpoint, audio2exp_yaml_path, wav2lip_checkpoint, device) - print(free_view_checkpoint) - self.animate_from_coeff = AnimateFromCoeff(free_view_checkpoint, mapping_checkpoint, - facerender_yaml_path, device) - self.device = device - - def test(self, source_image, driven_audio, still_mode, resize_mode, use_enhancer, result_dir='./'): - - time_tag = str(uuid.uuid4()) # strftime("%Y_%m_%d_%H.%M.%S") - save_dir = os.path.join(result_dir, time_tag) - os.makedirs(save_dir, exist_ok=True) - - input_dir = os.path.join(save_dir, 'input') - os.makedirs(input_dir, exist_ok=True) - - print(source_image) - pic_path = os.path.join(input_dir, os.path.basename(source_image)) - shutil.move(source_image, input_dir) - - if os.path.isfile(driven_audio): - audio_path = os.path.join(input_dir, os.path.basename(driven_audio)) - - #### mp3 to wav - if '.mp3' in audio_path: - mp3_to_wav(driven_audio, audio_path.replace('.mp3', '.wav'), 16000) - audio_path = audio_path.replace('.mp3', '.wav') - else: - shutil.move(driven_audio, input_dir) - else: - text2speech - - - os.makedirs(save_dir, exist_ok=True) - pose_style = 0 - #crop image and extract 3dmm from image - first_frame_dir = os.path.join(save_dir, 'first_frame_dir') - os.makedirs(first_frame_dir, exist_ok=True) - first_coeff_path, crop_pic_path, original_size = self.preprocess_model.generate(pic_path, first_frame_dir, crop_or_resize= 'resize' if resize_mode else 'crop') - if first_coeff_path is None: - raise AttributeError("No face is detected") - - #audio2ceoff - batch = get_data(first_coeff_path, audio_path, self.device) - coeff_path = self.audio_to_coeff.generate(batch, save_dir, pose_style) - #coeff2video - batch_size = 4 - data = get_facerender_data(coeff_path, crop_pic_path, first_coeff_path, audio_path, batch_size, still_mode=still_mode) - self.animate_from_coeff.generate(data, save_dir, enhancer='gfpgan' if use_enhancer else None, original_size=original_size) - video_name = data['video_name'] - print(f'The generated video is named {video_name} in {save_dir}') - - torch.cuda.empty_cache() - torch.cuda.synchronize() - - import gc; gc.collect() - - if use_enhancer: - return os.path.join(save_dir, video_name+'_enhanced.mp4'), os.path.join(save_dir, video_name+'_enhanced.mp4') - - else: - return os.path.join(save_dir, video_name+'.mp4'), os.path.join(save_dir, video_name+'.mp4') - - - \ No newline at end of file diff --git a/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/configs/speed.py b/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/configs/speed.py deleted file mode 100644 index 45e95237da65e44f35a172c25ac6dc4e313e4eae..0000000000000000000000000000000000000000 --- a/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/configs/speed.py +++ /dev/null @@ -1,23 +0,0 @@ -from easydict import EasyDict as edict - -# configs for test speed - -config = edict() -config.loss = "arcface" -config.network = "r50" -config.resume = False -config.output = None -config.embedding_size = 512 -config.sample_rate = 1.0 -config.fp16 = True -config.momentum = 0.9 -config.weight_decay = 5e-4 -config.batch_size = 128 -config.lr = 0.1 # batch size is 512 - -config.rec = "synthetic" -config.num_classes = 100 * 10000 -config.num_epoch = 30 -config.warmup_epoch = -1 -config.decay_epoch = [10, 16, 22] -config.val_targets = [] diff --git a/spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/nets_537238KB.py b/spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/nets_537238KB.py deleted file mode 100644 index a1bb530e006482704f234c2e739a695174142941..0000000000000000000000000000000000000000 --- a/spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/nets_537238KB.py +++ /dev/null @@ -1,123 +0,0 @@ -import torch -import numpy as np -from torch import nn -import torch.nn.functional as F - -from . import layers_537238KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 64) - self.stg1_high_band_net = BaseASPPNet(2, 64) - - self.stg2_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(32, 64) - - self.stg3_bridge = layers.Conv2DBNActiv(130, 64, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(64, 128) - - self.out = nn.Conv2d(128, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(64, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(64, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/spaces/AB-TW/team-ai/documents/bussiness_context/business_context.md b/spaces/AB-TW/team-ai/documents/bussiness_context/business_context.md deleted file mode 100644 index 8580fb1d8d8aa0c5bba8505d01dad8d234664d40..0000000000000000000000000000000000000000 --- a/spaces/AB-TW/team-ai/documents/bussiness_context/business_context.md +++ /dev/null @@ -1,19 +0,0 @@ -AB测试系统中的配置管理是该系统的一个重要功能,其中主要涉及两个业务概念:FeatureFlag 和 FeatureConfig。 - -FeatureFlag 用于标识某个具体 Feature,其主要属性包括 featureKey(Feature 标识)、名称、描述、enabled、创建时间、最后更新时间和 template。其中,template 作为 FeatureConfig 的模板,用于生成后续 FeatureConfig 的配置界面组件,其属性包括 key、名称、描述、dataType 和 items。其中,dataType 为枚举值,取值范围为 STRING、NUMBER、BOOLEAN、OBJECT 和 ARRAY。 - -FeatureConfig 用于配置某个 Feature 中控制前端展示效果的配置项,其主要属性包括 featureKey(Feature 标识)、data(配置数据)、saData(埋点数据)、status、标题、描述、创建时间和更新时间。其中,status 为枚举值,取值范围为 DRAFT、PUBLISHED 和 DISABLED。新增的 FeatureConfig 状态为 DRAFT,执行发布操作后变为 PUBLISHED,执行撤销操作后变为 DISABLED。一个 FeatureFlag 中可以包含多个 FeatureConfig,通过 featureKey 字段进行关联。 - -添加 FeatureConfig 的主要目的是为了控制 FeatureConfig 消费方的某个行为。在添加 FeatureConfig 时,应该包含 featureKey、data、saData、status、标题和描述信息。新增的 FeatureConfig 状态为 DRAFT。 - -客户端用户需要查看 FeatureConfig 中的 data、saData、更新时间和 id。同时,FeatureConfig 可以关联圈人条件,符合圈人条件的配置可以展示给客户端用户。客户端用户仅能查看符合圈人条件的 PUBLISHED 状态的数据。圈人条件包括上传用户白名单、按照比例灰度发布、地理位置和人群标签等。 - - 在添加 FeatureConfig 时,应该包含 featureKey、data、saData、status、标题和描述信息。新增的 FeatureConfig 状态为 DRAFT。 - - 客户端用户通过客户端访问服务端接口获取FeatureConfig,客户端通过FeatureConfig控制相关Feature展示 - - 用户白名单圈人条件需要上传用户id的白名单,仅在白名单里的用户可以获取到相关feature -地理位置配置端需要设置圈定地区的地理位置编号列表,客户端请求接口是传递地理位置编号参数,位置编号匹配的数据用户可见 - - - 新增实验需要提供实验名称、目标、分组信息(包括分组标识、描述、比例) \ No newline at end of file diff --git a/spaces/AEUPH/SENTIENCE_PROGRAMMING_LANGUAGE/README.md b/spaces/AEUPH/SENTIENCE_PROGRAMMING_LANGUAGE/README.md deleted file mode 100644 index dbfe690e350e37b5f6f05f27517cc9ad01314682..0000000000000000000000000000000000000000 --- a/spaces/AEUPH/SENTIENCE_PROGRAMMING_LANGUAGE/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: SENTIENCE PROGRAMMING LANGUAGE -emoji: 💻 -colorFrom: purple -colorTo: green -sdk: static -pinned: false -license: cc ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AI-Hobbyist/Hoyo-RVC/i18n/locale_diff.py b/spaces/AI-Hobbyist/Hoyo-RVC/i18n/locale_diff.py deleted file mode 100644 index 257277965e0866a86d0361863a8f1b408c4f71ab..0000000000000000000000000000000000000000 --- a/spaces/AI-Hobbyist/Hoyo-RVC/i18n/locale_diff.py +++ /dev/null @@ -1,45 +0,0 @@ -import json -import os -from collections import OrderedDict - -# Define the standard file name -standard_file = "zh_CN.json" - -# Find all JSON files in the directory -dir_path = "./" -languages = [ - f for f in os.listdir(dir_path) if f.endswith(".json") and f != standard_file -] - -# Load the standard file -with open(standard_file, "r", encoding="utf-8") as f: - standard_data = json.load(f, object_pairs_hook=OrderedDict) - -# Loop through each language file -for lang_file in languages: - # Load the language file - with open(lang_file, "r", encoding="utf-8") as f: - lang_data = json.load(f, object_pairs_hook=OrderedDict) - - # Find the difference between the language file and the standard file - diff = set(standard_data.keys()) - set(lang_data.keys()) - - miss = set(lang_data.keys()) - set(standard_data.keys()) - - # Add any missing keys to the language file - for key in diff: - lang_data[key] = key - - # Del any extra keys to the language file - for key in miss: - del lang_data[key] - - # Sort the keys of the language file to match the order of the standard file - lang_data = OrderedDict( - sorted(lang_data.items(), key=lambda x: list(standard_data.keys()).index(x[0])) - ) - - # Save the updated language file - with open(lang_file, "w", encoding="utf-8") as f: - json.dump(lang_data, f, ensure_ascii=False, indent=4) - f.write("\n") diff --git a/spaces/AIConsultant/MusicGen/docs/CONDITIONING.md b/spaces/AIConsultant/MusicGen/docs/CONDITIONING.md deleted file mode 100644 index 6e356cb8e9912d3e18fc84598c1acf77c6e7abc5..0000000000000000000000000000000000000000 --- a/spaces/AIConsultant/MusicGen/docs/CONDITIONING.md +++ /dev/null @@ -1,146 +0,0 @@ -# AudioCraft conditioning modules - -AudioCraft provides a -[modular implementation of conditioning modules](../audiocraft/modules/conditioners.py) -that can be used with the language model to condition the generation. -The codebase was developed in order to easily extend the set of modules -currently supported to easily develop new ways of controlling the generation. - - -## Conditioning methods - -For now, we support 3 main types of conditioning within AudioCraft: -* Text-based conditioning methods -* Waveform-based conditioning methods -* Joint embedding conditioning methods for text and audio projected in a shared latent space. - -The Language Model relies on 2 core components that handle processing information: -* The `ConditionProvider` class, that maps metadata to processed conditions leveraging -all the defined conditioners for the given task. -* The `ConditionFuser` class, that takes preprocessed conditions and properly fuse the -conditioning embedding to the language model inputs following a given fusing strategy. - -Different conditioners (for text, waveform, joint embeddings...) are provided as torch -modules in AudioCraft and are used internally in the language model to process the -conditioning signals and feed them to the language model. - - -## Core concepts - -### Conditioners - -The `BaseConditioner` torch module is the base implementation for all conditioners in audiocraft. - -Each conditioner is expected to implement 2 methods: -* The `tokenize` method that is used as a preprocessing method that contains all processing -that can lead to synchronization points (e.g. BPE tokenization with transfer to the GPU). -The output of the tokenize method will then be used to feed the forward method. -* The `forward` method that takes the output of the tokenize method and contains the core computation -to obtain the conditioning embedding along with a mask indicating valid indices (e.g. padding tokens). - -### ConditionProvider - -The ConditionProvider prepares and provides conditions given a dictionary of conditioners. - -Conditioners are specified as a dictionary of attributes and the corresponding conditioner -providing the processing logic for the given attribute. - -Similarly to the conditioners, the condition provider works in two steps to avoid sychronization points: -* A `tokenize` method that takes a list of conditioning attributes for the batch, -and run all tokenize steps for the set of conditioners. -* A `forward` method that takes the output of the tokenize step and run all the forward steps -for the set of conditioners. - -The list of conditioning attributes is passed as a list of `ConditioningAttributes` -that is presented just below. - -### ConditionFuser - -Once all conditioning signals have been extracted and processed by the `ConditionProvider` -as dense embeddings, they remain to be passed to the language model along with the original -language model inputs. - -The `ConditionFuser` handles specifically the logic to combine the different conditions -to the actual model input, supporting different strategies to combine them. - -One can therefore define different strategies to combine or fuse the condition to the input, in particular: -* Prepending the conditioning signal to the input with the `prepend` strategy, -* Summing the conditioning signal to the input with the `sum` strategy, -* Combining the conditioning relying on a cross-attention mechanism with the `cross` strategy, -* Using input interpolation with the `input_interpolate` strategy. - -### SegmentWithAttributes and ConditioningAttributes: From metadata to conditions - -The `ConditioningAttributes` dataclass is the base class for metadata -containing all attributes used for conditioning the language model. - -It currently supports the following types of attributes: -* Text conditioning attributes: Dictionary of textual attributes used for text-conditioning. -* Wav conditioning attributes: Dictionary of waveform attributes used for waveform-based -conditioning such as the chroma conditioning. -* JointEmbed conditioning attributes: Dictionary of text and waveform attributes -that are expected to be represented in a shared latent space. - -These different types of attributes are the attributes that are processed -by the different conditioners. - -`ConditioningAttributes` are extracted from metadata loaded along the audio in the datasets, -provided that the metadata used by the dataset implements the `SegmentWithAttributes` abstraction. - -All metadata-enabled datasets to use for conditioning in AudioCraft inherits -the [`audiocraft.data.info_dataset.InfoAudioDataset`](../audiocraft/data/info_audio_dataset.py) class -and the corresponding metadata inherits and implements the `SegmentWithAttributes` abstraction. -Refer to the [`audiocraft.data.music_dataset.MusicAudioDataset`](../audiocraft/data/music_dataset.py) -class as an example. - - -## Available conditioners - -### Text conditioners - -All text conditioners are expected to inherit from the `TextConditioner` class. - -AudioCraft currently provides two text conditioners: -* The `LUTConditioner` that relies on look-up-table of embeddings learned at train time, -and relying on either no tokenizer or a spacy tokenizer. This conditioner is particularly -useful for simple experiments and categorical labels. -* The `T5Conditioner` that relies on a -[pre-trained T5 model](https://huggingface.co/docs/transformers/model_doc/t5) -frozen or fine-tuned at train time to extract the text embeddings. - -### Waveform conditioners - -All waveform conditioners are expected to inherit from the `WaveformConditioner` class and -consists of conditioning method that takes a waveform as input. The waveform conditioner -must implement the logic to extract the embedding from the waveform and define the downsampling -factor from the waveform to the resulting embedding. - -The `ChromaStemConditioner` conditioner is a waveform conditioner for the chroma features -conditioning used by MusicGen. It takes a given waveform, extract relevant stems for melody -(namely all non drums and bass stems) using a -[pre-trained Demucs model](https://github.com/facebookresearch/demucs) -and then extract the chromagram bins from the remaining mix of stems. - -### Joint embeddings conditioners - -We finally provide support for conditioning based on joint text and audio embeddings through -the `JointEmbeddingConditioner` class and the `CLAPEmbeddingConditioner` that implements such -a conditioning method relying on a [pretrained CLAP model](https://github.com/LAION-AI/CLAP). - -## Classifier Free Guidance - -We provide a Classifier Free Guidance implementation in AudioCraft. With the classifier free -guidance dropout, all attributes are dropped with the same probability. - -## Attribute Dropout - -We further provide an attribute dropout strategy. Unlike the classifier free guidance dropout, -the attribute dropout drops given attributes with a defined probability, allowing the model -not to expect all conditioning signals to be provided at once. - -## Faster computation of conditions - -Conditioners that require some heavy computation on the waveform can be cached, in particular -the `ChromaStemConditioner` or `CLAPEmbeddingConditioner`. You just need to provide the -`cache_path` parameter to them. We recommend running dummy jobs for filling up the cache quickly. -An example is provied in the [musicgen.musicgen_melody_32khz grid](../audiocraft/grids/musicgen/musicgen_melody_32khz.py). \ No newline at end of file diff --git a/spaces/AIFILMS/StyleGANEX/models/stylegan2/op_ori/__init__.py b/spaces/AIFILMS/StyleGANEX/models/stylegan2/op_ori/__init__.py deleted file mode 100644 index d0918d92285955855be89f00096b888ee5597ce3..0000000000000000000000000000000000000000 --- a/spaces/AIFILMS/StyleGANEX/models/stylegan2/op_ori/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .fused_act import FusedLeakyReLU, fused_leaky_relu -from .upfirdn2d import upfirdn2d diff --git a/spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/remove_optimizer.py b/spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/remove_optimizer.py deleted file mode 100644 index 2b9871ee8022c0e0814abb46173fee1a6ae4ba9c..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/remove_optimizer.py +++ /dev/null @@ -1,18 +0,0 @@ -import argparse -import torch - - -def main(checkpoint): - state_dict = torch.load(checkpoint, map_location="cpu") - if "optimizer" in state_dict: - del state_dict["optimizer"] - if "lr_scheduler" in state_dict: - del state_dict["lr_scheduler"] - torch.save(state_dict, checkpoint) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("checkpoint", type=str) - args = parser.parse_args() - main(args.checkpoint) diff --git a/spaces/AIML-TUDA/safe-stable-diffusion/app.py b/spaces/AIML-TUDA/safe-stable-diffusion/app.py deleted file mode 100644 index f6e29144ad31ec6730b380a46f3cc53629d23d50..0000000000000000000000000000000000000000 --- a/spaces/AIML-TUDA/safe-stable-diffusion/app.py +++ /dev/null @@ -1,349 +0,0 @@ -import gradio as gr -# import torch -# from torch import autocast -# from diffusers import StableDiffusionPipeline -from datasets import load_dataset -from PIL import Image -from io import BytesIO -# import base64 -# import re -import os -import requests -import json -import base64 -# from urllib import parse - -from share_btn import community_icon_html, loading_icon_html, share_js - - -is_gpu_busy = False - -def safe_sd(prompt, n_samples, steps, scale, seed): - url = os.getenv('BACKEND_URL_NEW') - res = requests.post(url, json={ - "model": "together/universal-sd", - "prompt": prompt, - "n": n_samples, - "mode": "safe_text2img", - "steps": steps, - "seed": seed, - "guidance_scale": scale, - }, headers={ - "User-Agent": "hfdemo" - }) - return res - -def infer(prompt, n_samples, steps, scale, seed): - global is_gpu_busy - # generator = torch.Generator(device=device).manual_seed(seed) - # print("Is GPU busy? ", is_gpu_busy) - images = [] - # if(not is_gpu_busy): - # is_gpu_busy = True - # images_list = pipe( - # [prompt] * samples, - # num_inference_steps=steps, - # guidance_scale=scale, - # generator=generator, - # ) - # is_gpu_busy = False - # safe_image = Image.open(r"unsafe.png") - # for i, image in enumerate(images_list["sample"]): - # if(images_list["nsfw_content_detected"][i]): - # images.append(safe_image) - # else: - # images.append(image) - # else: - response = safe_sd(prompt, int(n_samples), max(50,int(steps)), scale, seed) - #requests.get(url.format(prompt, int(n_samples), max(50,int(steps)), f'{scale:.1f}', int(seed))) - #response = requests.get(url.format('a%20naked%20girl', 2, 50, 7.5, 2)) - print(response) - data = json.load(BytesIO(response.content)) - #data = response.json() - print(data) - if 'output' not in data: - raise gr.Error("Although safety guidance is enabled, potential unsafe content found. Please try again with different seed.") - else: - for image in data['output']['choices']: - im = Image.open(BytesIO(base64.b64decode(image['image_base64']))) - images.append(im) - - # payload = {'prompt': prompt} - # images_request = requests.post(url, json=payload) - # for image in images_request.json()["output"]['choices']: - # image_b64 = (f"data:image/jpeg;base64,{image['image_base64']}") - # images.append(image_b64) - - return images - - -css = """ - .gradio-container { - font-family: 'IBM Plex Sans', sans-serif; - } - .gr-button { - color: white; - border-color: #3a669bff; - background: #3a669bff; - } - input[type='range'] { - accent-color: #3a669bff; - } - .dark input[type='range'] { - accent-color: #3a669bff; - } - .container { - max-width: 730px; - margin: auto; - padding-top: 1.5rem; - } - #gallery { - min-height: 22rem; - margin-bottom: 15px; - margin-left: auto; - margin-right: auto; - border-bottom-right-radius: .5rem !important; - border-bottom-left-radius: .5rem !important; - } - #gallery>div>.h-full { - min-height: 20rem; - } - .details:hover { - text-decoration: underline; - } - .gr-button { - white-space: nowrap; - } - .gr-button:focus { - border-color: rgb(147 197 253 / var(--tw-border-opacity)); - outline: none; - box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); - --tw-border-opacity: 1; - --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); - --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color); - --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity)); - --tw-ring-opacity: .5; - } - #advanced-btn { - font-size: .7rem !important; - line-height: 19px; - margin-top: 12px; - margin-bottom: 12px; - padding: 2px 8px; - border-radius: 14px !important; - } - #advanced-options { - display: none; - margin-bottom: 20px; - } - .footer { - margin-bottom: 45px; - margin-top: 35px; - text-align: center; - border-bottom: 1px solid #e5e5e5; - } - .footer>p { - font-size: .8rem; - display: inline-block; - padding: 0 10px; - transform: translateY(10px); - background: white; - } - .dark .footer { - border-color: #303030; - } - .dark .footer>p { - background: #0b0f19; - } - .acknowledgments h4{ - margin: 1.25em 0 .25em 0; - font-weight: bold; - font-size: 115%; - } - #container-advanced-btns{ - display: flex; - flex-wrap: wrap; - justify-content: space-between; - align-items: center; - } - .animate-spin { - animation: spin 1s linear infinite; - } - @keyframes spin { - from { - transform: rotate(0deg); - } - to { - transform: rotate(360deg); - } - } - #share-btn-container { - display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #3a669bff; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem; - } - #share-btn { - all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important; - } - #share-btn * { - all: unset; - } - .gr-form{ - flex: 1 1 50%; border-top-right-radius: 0; border-bottom-right-radius: 0; - } - #prompt-container{ - gap: 0; - } -""" - -block = gr.Blocks(css=css) - -examples = [ - [ - 'a gorgeous female photo', - 1, - 50, - 7.5, - 251815625, - ], - [ - 'a gorgeous male photo', - 1, - 50, - 7.5, - 479079226, - ], - [ - 'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and children from bahnhof zoo, detailed ', - 1, - 50, - 9, - 364629028, - ], - [ - 'portrait of Sickly diseased dying Samurai warrior, sun shining, photo realistic illustration by greg rutkowski, thomas kindkade, alphonse mucha, loish, norman rockwell.', - 1, - 50, - 10, - 1714108957, - ], - [ - 'a photograph by vanessa beecroft', - 1, - 50, - 7.5, - 445713657, - ], -] - -with block: - gr.HTML( - """ -
-
- -

- Safe Stable Diffusion Demo -

-
-

- Safe Stable Diffusion extends Stable Diffusion with safety guidance. In the case of NSFW images it returns the closest non-NSFW images instead of a black square. - Details can be found in the Safe Latent Diffusion: Mitigating Inappropriate Degeneration in Diffusion Models paper. -

-

- To directly compare to Stable Diffusion try this demo. -

-
- """ - ) - with gr.Group(): - with gr.Box(): - with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True): - text = gr.Textbox( - label="Enter your prompt", - show_label=False, - max_lines=1, - placeholder="Enter your prompt", - elem_id="prompt-text-input", - ).style( - border=(True, False, True, True), - rounded=(True, False, False, True), - container=False, - ) - btn = gr.Button("Generate image").style( - margin=False, - rounded=(False, True, True, False), - full_width=False, - ) - - gallery = gr.Gallery( - label="Generated images", show_label=False, elem_id="gallery" - ).style(grid=[1], height="auto") - - with gr.Group(elem_id="container-advanced-btns"): - advanced_button = gr.Button("Advanced options", elem_id="advanced-btn") - with gr.Group(elem_id="share-btn-container"): - community_icon = gr.HTML(community_icon_html) - loading_icon = gr.HTML(loading_icon_html) - share_button = gr.Button("Share to community", elem_id="share-btn") - - with gr.Row(elem_id="advanced-options"): - #gr.Markdown("Advanced settings are temporarily unavailable") - samples = gr.Slider(label="Images", minimum=1, maximum=1, value=1, step=1) - steps = gr.Slider(label="Steps", minimum=50, maximum=50, value=50, step=1) - scale = gr.Slider( - label="Guidance Scale", minimum=7.5, maximum=20, value=7.5, step=0.5 - ) - seed = gr.Slider( - label="Seed", - minimum=0, - maximum=2147483647, - step=1, - randomize=True, - ) - - ex = gr.Examples(examples=examples, fn=infer, inputs=[text, samples, steps, scale, seed], - outputs=[gallery, community_icon, loading_icon, share_button], cache_examples=False) - ex.dataset.headers = [""] - - text.submit(infer, inputs=[text, samples, steps, scale, seed], outputs=gallery) - btn.click(infer, inputs=[text, samples, steps, scale, seed], outputs=gallery) - - advanced_button.click( - None, - [], - text, - _js=""" - () => { - const options = document.querySelector("body > gradio-app").querySelector("#advanced-options"); - options.style.display = ["none", ""].includes(options.style.display) ? "flex" : "none"; - }""", - ) - share_button.click( - None, - [], - [], - _js=share_js, - ) - gr.HTML( - """ - -
-

LICENSE

-The model is licensed with a CreativeML Open RAIL-M license. The authors claim no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in this license. The license forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups. For the full list of restrictions please read the license.

-

Biases and content acknowledgment

-Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence. While the applied safety guidance suppresses the majority of inappropriate content, this still could apply to Safe Stable Diffusion models. The original model was trained on the LAION-5B dataset, which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes. Safety guidance suppresses potentially inappropriate content during inference. You can read more in the model card.

-
- """ - ) - -block.queue(concurrency_count=40, max_size=20).launch(max_threads=150) \ No newline at end of file diff --git a/spaces/AONYLMR/White-box-Cartoonization/wbc/cartoonize.py b/spaces/AONYLMR/White-box-Cartoonization/wbc/cartoonize.py deleted file mode 100644 index 25faf1ceb95aaed9a3f7a7982d17a03dc6bc32b1..0000000000000000000000000000000000000000 --- a/spaces/AONYLMR/White-box-Cartoonization/wbc/cartoonize.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -import cv2 -import numpy as np -import tensorflow as tf -import wbc.network as network -import wbc.guided_filter as guided_filter -from tqdm import tqdm - - -def resize_crop(image): - h, w, c = np.shape(image) - if min(h, w) > 720: - if h > w: - h, w = int(720 * h / w), 720 - else: - h, w = 720, int(720 * w / h) - image = cv2.resize(image, (w, h), - interpolation=cv2.INTER_AREA) - h, w = (h // 8) * 8, (w // 8) * 8 - image = image[:h, :w, :] - return image - - -def cartoonize(load_folder, save_folder, model_path): - print(model_path) - input_photo = tf.placeholder(tf.float32, [1, None, None, 3]) - network_out = network.unet_generator(input_photo) - final_out = guided_filter.guided_filter(input_photo, network_out, r=1, eps=5e-3) - - all_vars = tf.trainable_variables() - gene_vars = [var for var in all_vars if 'generator' in var.name] - saver = tf.train.Saver(var_list=gene_vars) - - config = tf.ConfigProto() - config.gpu_options.allow_growth = True - sess = tf.Session(config=config) - - sess.run(tf.global_variables_initializer()) - saver.restore(sess, tf.train.latest_checkpoint(model_path)) - name_list = os.listdir(load_folder) - for name in tqdm(name_list): - try: - load_path = os.path.join(load_folder, name) - save_path = os.path.join(save_folder, name) - image = cv2.imread(load_path) - image = resize_crop(image) - batch_image = image.astype(np.float32) / 127.5 - 1 - batch_image = np.expand_dims(batch_image, axis=0) - output = sess.run(final_out, feed_dict={input_photo: batch_image}) - output = (np.squeeze(output) + 1) * 127.5 - output = np.clip(output, 0, 255).astype(np.uint8) - cv2.imwrite(save_path, output) - except: - print('cartoonize {} failed'.format(load_path)) - - -class Cartoonize: - def __init__(self, model_path): - print(model_path) - self.input_photo = tf.placeholder(tf.float32, [1, None, None, 3]) - network_out = network.unet_generator(self.input_photo) - self.final_out = guided_filter.guided_filter(self.input_photo, network_out, r=1, eps=5e-3) - - all_vars = tf.trainable_variables() - gene_vars = [var for var in all_vars if 'generator' in var.name] - saver = tf.train.Saver(var_list=gene_vars) - - config = tf.ConfigProto() - config.gpu_options.allow_growth = True - self.sess = tf.Session(config=config) - - self.sess.run(tf.global_variables_initializer()) - saver.restore(self.sess, tf.train.latest_checkpoint(model_path)) - - def run(self, load_folder, save_folder): - name_list = os.listdir(load_folder) - for name in tqdm(name_list): - try: - load_path = os.path.join(load_folder, name) - save_path = os.path.join(save_folder, name) - image = cv2.imread(load_path) - image = resize_crop(image) - batch_image = image.astype(np.float32) / 127.5 - 1 - batch_image = np.expand_dims(batch_image, axis=0) - output = self.sess.run(self.final_out, feed_dict={self.input_photo: batch_image}) - output = (np.squeeze(output) + 1) * 127.5 - output = np.clip(output, 0, 255).astype(np.uint8) - cv2.imwrite(save_path, output) - except: - print('cartoonize {} failed'.format(load_path)) - - def run_sigle(self, load_path, save_path): - try: - image = cv2.imread(load_path) - image = resize_crop(image) - batch_image = image.astype(np.float32) / 127.5 - 1 - batch_image = np.expand_dims(batch_image, axis=0) - output = self.sess.run(self.final_out, feed_dict={self.input_photo: batch_image}) - output = (np.squeeze(output) + 1) * 127.5 - output = np.clip(output, 0, 255).astype(np.uint8) - cv2.imwrite(save_path, output) - except: - print('cartoonize {} failed'.format(load_path)) - - -if __name__ == '__main__': - model_path = 'saved_models' - load_folder = 'test_images' - save_folder = 'cartoonized_images' - if not os.path.exists(save_folder): - os.mkdir(save_folder) - cartoonize(load_folder, save_folder, model_path) diff --git a/spaces/AONYLMR/anime-remove-background/app.py b/spaces/AONYLMR/anime-remove-background/app.py deleted file mode 100644 index 230a0d5f8a3da6ab18ecb8db1cd90016a489b96a..0000000000000000000000000000000000000000 --- a/spaces/AONYLMR/anime-remove-background/app.py +++ /dev/null @@ -1,52 +0,0 @@ -import gradio as gr -import huggingface_hub -import onnxruntime as rt -import numpy as np -import cv2 - - -def get_mask(img, s=1024): - img = (img / 255).astype(np.float32) - h, w = h0, w0 = img.shape[:-1] - h, w = (s, int(s * w / h)) if h > w else (int(s * h / w), s) - ph, pw = s - h, s - w - img_input = np.zeros([s, s, 3], dtype=np.float32) - img_input[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] = cv2.resize(img, (w, h)) - img_input = np.transpose(img_input, (2, 0, 1)) - img_input = img_input[np.newaxis, :] - mask = rmbg_model.run(None, {'img': img_input})[0][0] - mask = np.transpose(mask, (1, 2, 0)) - mask = mask[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] - mask = cv2.resize(mask, (w0, h0))[:, :, np.newaxis] - return mask - - -def rmbg_fn(img): - mask = get_mask(img) - img = (mask * img + 255 * (1 - mask)).astype(np.uint8) - mask = (mask * 255).astype(np.uint8) - img = np.concatenate([img, mask], axis=2, dtype=np.uint8) - mask = mask.repeat(3, axis=2) - return mask, img - - -if __name__ == "__main__": - providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] - model_path = huggingface_hub.hf_hub_download("skytnt/anime-seg", "isnetis.onnx") - rmbg_model = rt.InferenceSession(model_path, providers=providers) - app = gr.Blocks() - with app: - gr.Markdown("# Anime Remove Background\n\n" - "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=skytnt.animeseg)\n\n" - "demo for [https://github.com/SkyTNT/anime-segmentation/](https://github.com/SkyTNT/anime-segmentation/)") - with gr.Row(): - with gr.Column(): - input_img = gr.Image(label="input image") - examples_data = [[f"examples/{x:02d}.jpg"] for x in range(1, 4)] - examples = gr.Dataset(components=[input_img], samples=examples_data) - run_btn = gr.Button(variant="primary") - output_mask = gr.Image(label="mask") - output_img = gr.Image(label="result", image_mode="RGBA") - examples.click(lambda x: x[0], [examples], [input_img]) - run_btn.click(rmbg_fn, [input_img], [output_mask, output_img]) - app.launch() diff --git a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/components/LoadingModalWritable.js b/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/components/LoadingModalWritable.js deleted file mode 100644 index 7d9809f71316a53f58c9c0d7d539d9b0374bf201..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/components/LoadingModalWritable.js +++ /dev/null @@ -1,6 +0,0 @@ -import { writable } from "svelte/store"; - -export const progress_writable = writable(0); -export const curr_model_writable = writable(""); -export const map_writable = writable(["", ""]); -export const phi_writable = writable(false); \ No newline at end of file diff --git a/spaces/Adapter/CoAdapter/ldm/modules/extra_condition/midas/midas/base_model.py b/spaces/Adapter/CoAdapter/ldm/modules/extra_condition/midas/midas/base_model.py deleted file mode 100644 index 5cf430239b47ec5ec07531263f26f5c24a2311cd..0000000000000000000000000000000000000000 --- a/spaces/Adapter/CoAdapter/ldm/modules/extra_condition/midas/midas/base_model.py +++ /dev/null @@ -1,16 +0,0 @@ -import torch - - -class BaseModel(torch.nn.Module): - def load(self, path): - """Load model from file. - - Args: - path (str): file path - """ - parameters = torch.load(path, map_location=torch.device('cpu')) - - if "optimizer" in parameters: - parameters = parameters["model"] - - self.load_state_dict(parameters) diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/drag/Drag.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/drag/Drag.js deleted file mode 100644 index 732c06df1f2a0300bfdeb79685b58ed5cae452da..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/drag/Drag.js +++ /dev/null @@ -1,2 +0,0 @@ -import Drag from '../../../plugins/drag.js'; -export default Drag; \ No newline at end of file diff --git a/spaces/Ahmadjaved/Genaispeech/app.py b/spaces/Ahmadjaved/Genaispeech/app.py deleted file mode 100644 index ca8b6d40b4ab898c70da92f4a4298de2baf703dc..0000000000000000000000000000000000000000 --- a/spaces/Ahmadjaved/Genaispeech/app.py +++ /dev/null @@ -1,164 +0,0 @@ -import os -import re -import requests -import json -import gradio as gr -from langchain.chat_models import ChatOpenAI -from langchain import LLMChain, PromptTemplate -from langchain.memory import ConversationBufferMemory - -OPENAI_API_KEY=os.getenv('OPENAI_API_KEY') -PLAY_HT_API_KEY=os.getenv('PLAY_HT_API_KEY') -PLAY_HT_USER_ID=os.getenv('PLAY_HT_USER_ID') - -PLAY_HT_VOICE_ID=os.getenv('PLAY_HT_VOICE_ID') -play_ht_api_get_audio_url = "https://play.ht/api/v2/tts" - - -template = """You are a helpful assistant to answer user queries. -{chat_history} -User: {user_message} -Chatbot:""" - -prompt = PromptTemplate( - input_variables=["chat_history", "user_message"], template=template -) - -memory = ConversationBufferMemory(memory_key="chat_history") - -llm_chain = LLMChain( - llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"), - prompt=prompt, - verbose=True, - memory=memory, -) - -headers = { - "accept": "text/event-stream", - "content-type": "application/json", - "AUTHORIZATION": "Bearer "+ PLAY_HT_API_KEY, - "X-USER-ID": PLAY_HT_USER_ID -} - - -def get_payload(text): - return { - "text": text, - "voice": PLAY_HT_VOICE_ID, - "quality": "medium", - "output_format": "mp3", - "speed": 1, - "sample_rate": 24000, - "seed": None, - "temperature": None - } - -def get_generated_audio(text): - payload = get_payload(text) - generated_response = {} - try: - response = requests.post(play_ht_api_get_audio_url, json=payload, headers=headers) - response.raise_for_status() - generated_response["type"]= 'SUCCESS' - generated_response["response"] = response.text - except requests.exceptions.RequestException as e: - generated_response["type"]= 'ERROR' - try: - response_text = json.loads(response.text) - if response_text['error_message']: - generated_response["response"] = response_text['error_message'] - else: - generated_response["response"] = response.text - except Exception as e: - generated_response["response"] = response.text - except Exception as e: - generated_response["type"]= 'ERROR' - generated_response["response"] = response.text - return generated_response - -def extract_urls(text): - # Define the regex pattern for URLs - url_pattern = r'https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+[/\w\.-]*' - - # Find all occurrences of URLs in the text - urls = re.findall(url_pattern, text) - - return urls - -def get_audio_reply_for_question(text): - generated_audio_event = get_generated_audio(text) - #From get_generated_audio, you will get events in a string format, from that we need to extract the url - final_response = { - "audio_url": '', - "message": '' - } - if generated_audio_event["type"] == 'SUCCESS': - audio_urls = extract_urls(generated_audio_event["response"]) - if len(audio_urls) == 0: - final_response['message'] = "No audio file link found in generated event" - else: - final_response['audio_url'] = audio_urls[-1] - else: - final_response['message'] = generated_audio_event['response'] - return final_response - -def download_url(url): - try: - # Send a GET request to the URL to fetch the content - final_response = { - 'content':'', - 'error':'' - } - response = requests.get(url) - # Check if the request was successful (status code 200) - if response.status_code == 200: - final_response['content'] = response.content - else: - final_response['error'] = f"Failed to download the URL. Status code: {response.status_code}" - except Exception as e: - final_response['error'] = f"Failed to download the URL. Error: {e}" - return final_response - -def get_filename_from_url(url): - # Use os.path.basename() to extract the file name from the URL - file_name = os.path.basename(url) - return file_name - -def get_text_response(user_message): - response = llm_chain.predict(user_message = user_message) - return response - -def get_text_response_and_audio_response(user_message): - response = get_text_response(user_message) # Getting the reply from Open AI - audio_reply_for_question_response = get_audio_reply_for_question(response) - final_response = { - 'output_file_path': '', - 'message':'' - } - audio_url = audio_reply_for_question_response['audio_url'] - if audio_url: - output_file_path=get_filename_from_url(audio_url) - download_url_response = download_url(audio_url) - audio_content = download_url_response['content'] - if audio_content: - with open(output_file_path, "wb") as audio_file: - audio_file.write(audio_content) - final_response['output_file_path'] = output_file_path - else: - final_response['message'] = download_url_response['error'] - else: - final_response['message'] = audio_reply_for_question_response['message'] - return final_response - -def chat_bot_response(message, history): - text_and_audio_response = get_text_response_and_audio_response(message) - output_file_path = text_and_audio_response['output_file_path'] - if output_file_path: - return (text_and_audio_response['output_file_path'],) - else: - return text_and_audio_response['message'] - -demo = gr.ChatInterface(chat_bot_response,examples=["How are you doing?","What are your interests?","Which places do you like to visit?"]) - -if __name__ == "__main__": - demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`. diff --git a/spaces/AlexWang/lama/bin/make_checkpoint.py b/spaces/AlexWang/lama/bin/make_checkpoint.py deleted file mode 100644 index 322147483915bef758770ae931e705e56083fa8d..0000000000000000000000000000000000000000 --- a/spaces/AlexWang/lama/bin/make_checkpoint.py +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env python3 - -import os -import shutil - -import torch - - -def get_checkpoint_files(s): - s = s.strip() - if ',' in s: - return [get_checkpoint_files(chunk) for chunk in s.split(',')] - return 'last.ckpt' if s == 'last' else f'{s}.ckpt' - - -def main(args): - checkpoint_fnames = get_checkpoint_files(args.epochs) - if isinstance(checkpoint_fnames, str): - checkpoint_fnames = [checkpoint_fnames] - assert len(checkpoint_fnames) >= 1 - - checkpoint_path = os.path.join(args.indir, 'models', checkpoint_fnames[0]) - checkpoint = torch.load(checkpoint_path, map_location='cpu') - del checkpoint['optimizer_states'] - - if len(checkpoint_fnames) > 1: - for fname in checkpoint_fnames[1:]: - print('sum', fname) - sum_tensors_cnt = 0 - other_cp = torch.load(os.path.join(args.indir, 'models', fname), map_location='cpu') - for k in checkpoint['state_dict'].keys(): - if checkpoint['state_dict'][k].dtype is torch.float: - checkpoint['state_dict'][k].data.add_(other_cp['state_dict'][k].data) - sum_tensors_cnt += 1 - print('summed', sum_tensors_cnt, 'tensors') - - for k in checkpoint['state_dict'].keys(): - if checkpoint['state_dict'][k].dtype is torch.float: - checkpoint['state_dict'][k].data.mul_(1 / float(len(checkpoint_fnames))) - - state_dict = checkpoint['state_dict'] - - if not args.leave_discriminators: - for k in list(state_dict.keys()): - if k.startswith('discriminator.'): - del state_dict[k] - - if not args.leave_losses: - for k in list(state_dict.keys()): - if k.startswith('loss_'): - del state_dict[k] - - out_checkpoint_path = os.path.join(args.outdir, 'models', 'best.ckpt') - os.makedirs(os.path.dirname(out_checkpoint_path), exist_ok=True) - - torch.save(checkpoint, out_checkpoint_path) - - shutil.copy2(os.path.join(args.indir, 'config.yaml'), - os.path.join(args.outdir, 'config.yaml')) - - -if __name__ == '__main__': - import argparse - - aparser = argparse.ArgumentParser() - aparser.add_argument('indir', - help='Path to directory with output of training ' - '(i.e. directory, which has samples, modules, config.yaml and train.log') - aparser.add_argument('outdir', - help='Where to put minimal checkpoint, which can be consumed by "bin/predict.py"') - aparser.add_argument('--epochs', type=str, default='last', - help='Which checkpoint to take. ' - 'Can be "last" or integer - number of epoch') - aparser.add_argument('--leave-discriminators', action='store_true', - help='If enabled, the state of discriminators will not be removed from the checkpoint') - aparser.add_argument('--leave-losses', action='store_true', - help='If enabled, weights of nn-based losses (e.g. perceptual) will not be removed') - - main(aparser.parse_args()) diff --git a/spaces/Alpaca233/SadTalker/src/face3d/util/__init__.py b/spaces/Alpaca233/SadTalker/src/face3d/util/__init__.py deleted file mode 100644 index 04eecb58b62f8c9d11d17606c6241d278a48b9b9..0000000000000000000000000000000000000000 --- a/spaces/Alpaca233/SadTalker/src/face3d/util/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -"""This package includes a miscellaneous collection of useful helper functions.""" -from src.face3d.util import * - diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/spectrogram_diffusion/__init__.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/spectrogram_diffusion/__init__.py deleted file mode 100644 index 05b14a857630e7a7c001a8ae4c23772dfc62a08a..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/spectrogram_diffusion/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# flake8: noqa -from ...utils import is_note_seq_available, is_transformers_available, is_torch_available -from ...utils import OptionalDependencyNotAvailable - - -try: - if not (is_transformers_available() and is_torch_available()): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 -else: - from .notes_encoder import SpectrogramNotesEncoder - from .continous_encoder import SpectrogramContEncoder - from .pipeline_spectrogram_diffusion import ( - SpectrogramContEncoder, - SpectrogramDiffusionPipeline, - T5FilmDecoder, - ) - -try: - if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 -else: - from .midi_utils import MidiProcessor diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/controlnet/test_controlnet_img2img.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/controlnet/test_controlnet_img2img.py deleted file mode 100644 index 2b9ec7e463f0df7f8cf2dd623eebe1c8ba01d9af..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/controlnet/test_controlnet_img2img.py +++ /dev/null @@ -1,449 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ - -import gc -import random -import tempfile -import unittest - -import numpy as np -import torch -from PIL import Image -from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer - -from diffusers import ( - AutoencoderKL, - ControlNetModel, - DDIMScheduler, - StableDiffusionControlNetImg2ImgPipeline, - UNet2DConditionModel, -) -from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel -from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device -from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu - -from ..pipeline_params import ( - IMAGE_TO_IMAGE_IMAGE_PARAMS, - TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, - TEXT_GUIDED_IMAGE_VARIATION_PARAMS, -) -from ..test_pipelines_common import ( - PipelineKarrasSchedulerTesterMixin, - PipelineLatentTesterMixin, - PipelineTesterMixin, -) - - -enable_full_determinism() - - -class ControlNetImg2ImgPipelineFastTests( - PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase -): - pipeline_class = StableDiffusionControlNetImg2ImgPipeline - params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} - batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS - image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"}) - image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS - - def get_dummy_components(self): - torch.manual_seed(0) - unet = UNet2DConditionModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=4, - out_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), - cross_attention_dim=32, - ) - torch.manual_seed(0) - controlnet = ControlNetModel( - block_out_channels=(32, 64), - layers_per_block=2, - in_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - cross_attention_dim=32, - conditioning_embedding_out_channels=(16, 32), - ) - torch.manual_seed(0) - scheduler = DDIMScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - clip_sample=False, - set_alpha_to_one=False, - ) - torch.manual_seed(0) - vae = AutoencoderKL( - block_out_channels=[32, 64], - in_channels=3, - out_channels=3, - down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], - up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], - latent_channels=4, - ) - torch.manual_seed(0) - text_encoder_config = CLIPTextConfig( - bos_token_id=0, - eos_token_id=2, - hidden_size=32, - intermediate_size=37, - layer_norm_eps=1e-05, - num_attention_heads=4, - num_hidden_layers=5, - pad_token_id=1, - vocab_size=1000, - ) - text_encoder = CLIPTextModel(text_encoder_config) - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - components = { - "unet": unet, - "controlnet": controlnet, - "scheduler": scheduler, - "vae": vae, - "text_encoder": text_encoder, - "tokenizer": tokenizer, - "safety_checker": None, - "feature_extractor": None, - } - return components - - def get_dummy_inputs(self, device, seed=0): - if str(device).startswith("mps"): - generator = torch.manual_seed(seed) - else: - generator = torch.Generator(device=device).manual_seed(seed) - - controlnet_embedder_scale_factor = 2 - control_image = randn_tensor( - (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), - generator=generator, - device=torch.device(device), - ) - image = floats_tensor(control_image.shape, rng=random.Random(seed)).to(device) - image = image.cpu().permute(0, 2, 3, 1)[0] - image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) - inputs = { - "prompt": "A painting of a squirrel eating a burger", - "generator": generator, - "num_inference_steps": 2, - "guidance_scale": 6.0, - "output_type": "numpy", - "image": image, - "control_image": control_image, - } - - return inputs - - def test_attention_slicing_forward_pass(self): - return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) - - @unittest.skipIf( - torch_device != "cuda" or not is_xformers_available(), - reason="XFormers attention is only available with CUDA and `xformers` installed", - ) - def test_xformers_attention_forwardGenerator_pass(self): - self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) - - def test_inference_batch_single_identical(self): - self._test_inference_batch_single_identical(expected_max_diff=2e-3) - - -class StableDiffusionMultiControlNetPipelineFastTests( - PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase -): - pipeline_class = StableDiffusionControlNetImg2ImgPipeline - params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} - batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS - image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess - - def get_dummy_components(self): - torch.manual_seed(0) - unet = UNet2DConditionModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=4, - out_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), - cross_attention_dim=32, - ) - torch.manual_seed(0) - - def init_weights(m): - if isinstance(m, torch.nn.Conv2d): - torch.nn.init.normal(m.weight) - m.bias.data.fill_(1.0) - - controlnet1 = ControlNetModel( - block_out_channels=(32, 64), - layers_per_block=2, - in_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - cross_attention_dim=32, - conditioning_embedding_out_channels=(16, 32), - ) - controlnet1.controlnet_down_blocks.apply(init_weights) - - torch.manual_seed(0) - controlnet2 = ControlNetModel( - block_out_channels=(32, 64), - layers_per_block=2, - in_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - cross_attention_dim=32, - conditioning_embedding_out_channels=(16, 32), - ) - controlnet2.controlnet_down_blocks.apply(init_weights) - - torch.manual_seed(0) - scheduler = DDIMScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - clip_sample=False, - set_alpha_to_one=False, - ) - torch.manual_seed(0) - vae = AutoencoderKL( - block_out_channels=[32, 64], - in_channels=3, - out_channels=3, - down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], - up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], - latent_channels=4, - ) - torch.manual_seed(0) - text_encoder_config = CLIPTextConfig( - bos_token_id=0, - eos_token_id=2, - hidden_size=32, - intermediate_size=37, - layer_norm_eps=1e-05, - num_attention_heads=4, - num_hidden_layers=5, - pad_token_id=1, - vocab_size=1000, - ) - text_encoder = CLIPTextModel(text_encoder_config) - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - controlnet = MultiControlNetModel([controlnet1, controlnet2]) - - components = { - "unet": unet, - "controlnet": controlnet, - "scheduler": scheduler, - "vae": vae, - "text_encoder": text_encoder, - "tokenizer": tokenizer, - "safety_checker": None, - "feature_extractor": None, - } - return components - - def get_dummy_inputs(self, device, seed=0): - if str(device).startswith("mps"): - generator = torch.manual_seed(seed) - else: - generator = torch.Generator(device=device).manual_seed(seed) - - controlnet_embedder_scale_factor = 2 - - control_image = [ - randn_tensor( - (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), - generator=generator, - device=torch.device(device), - ), - randn_tensor( - (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), - generator=generator, - device=torch.device(device), - ), - ] - - image = floats_tensor(control_image[0].shape, rng=random.Random(seed)).to(device) - image = image.cpu().permute(0, 2, 3, 1)[0] - image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) - inputs = { - "prompt": "A painting of a squirrel eating a burger", - "generator": generator, - "num_inference_steps": 2, - "guidance_scale": 6.0, - "output_type": "numpy", - "image": image, - "control_image": control_image, - } - - return inputs - - def test_control_guidance_switch(self): - components = self.get_dummy_components() - pipe = self.pipeline_class(**components) - pipe.to(torch_device) - - scale = 10.0 - steps = 4 - - inputs = self.get_dummy_inputs(torch_device) - inputs["num_inference_steps"] = steps - inputs["controlnet_conditioning_scale"] = scale - output_1 = pipe(**inputs)[0] - - inputs = self.get_dummy_inputs(torch_device) - inputs["num_inference_steps"] = steps - inputs["controlnet_conditioning_scale"] = scale - output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] - - inputs = self.get_dummy_inputs(torch_device) - inputs["num_inference_steps"] = steps - inputs["controlnet_conditioning_scale"] = scale - output_3 = pipe(**inputs, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0] - - inputs = self.get_dummy_inputs(torch_device) - inputs["num_inference_steps"] = steps - inputs["controlnet_conditioning_scale"] = scale - output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0] - - # make sure that all outputs are different - assert np.sum(np.abs(output_1 - output_2)) > 1e-3 - assert np.sum(np.abs(output_1 - output_3)) > 1e-3 - assert np.sum(np.abs(output_1 - output_4)) > 1e-3 - - def test_attention_slicing_forward_pass(self): - return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) - - @unittest.skipIf( - torch_device != "cuda" or not is_xformers_available(), - reason="XFormers attention is only available with CUDA and `xformers` installed", - ) - def test_xformers_attention_forwardGenerator_pass(self): - self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) - - def test_inference_batch_single_identical(self): - self._test_inference_batch_single_identical(expected_max_diff=2e-3) - - def test_save_pretrained_raise_not_implemented_exception(self): - components = self.get_dummy_components() - pipe = self.pipeline_class(**components) - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - with tempfile.TemporaryDirectory() as tmpdir: - try: - # save_pretrained is not implemented for Multi-ControlNet - pipe.save_pretrained(tmpdir) - except NotImplementedError: - pass - - -@slow -@require_torch_gpu -class ControlNetImg2ImgPipelineSlowTests(unittest.TestCase): - def tearDown(self): - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - def test_canny(self): - controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") - - pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet - ) - pipe.enable_model_cpu_offload() - pipe.set_progress_bar_config(disable=None) - - generator = torch.Generator(device="cpu").manual_seed(0) - prompt = "evil space-punk bird" - control_image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" - ).resize((512, 512)) - image = load_image( - "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" - ).resize((512, 512)) - - output = pipe( - prompt, - image, - control_image=control_image, - generator=generator, - output_type="np", - num_inference_steps=50, - strength=0.6, - ) - - image = output.images[0] - - assert image.shape == (512, 512, 3) - - expected_image = load_numpy( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" - ) - - assert np.abs(expected_image - image).max() < 9e-2 - - def test_load_local(self): - controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") - pipe_1 = StableDiffusionControlNetImg2ImgPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet - ) - - controlnet = ControlNetModel.from_single_file( - "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth" - ) - pipe_2 = StableDiffusionControlNetImg2ImgPipeline.from_single_file( - "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors", - safety_checker=None, - controlnet=controlnet, - ) - control_image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" - ).resize((512, 512)) - image = load_image( - "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" - ).resize((512, 512)) - - pipes = [pipe_1, pipe_2] - images = [] - for pipe in pipes: - pipe.enable_model_cpu_offload() - pipe.set_progress_bar_config(disable=None) - - generator = torch.Generator(device="cpu").manual_seed(0) - prompt = "bird" - output = pipe( - prompt, - image=image, - control_image=control_image, - strength=0.9, - generator=generator, - output_type="np", - num_inference_steps=3, - ) - images.append(output.images[0]) - - del pipe - gc.collect() - torch.cuda.empty_cache() - - assert np.abs(images[0] - images[1]).sum() < 1e-3 diff --git a/spaces/Andy1621/uniformer_image_detection/configs/hrnet/htc_x101_64x4d_fpn_16x1_28e_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/hrnet/htc_x101_64x4d_fpn_16x1_28e_coco.py deleted file mode 100644 index 815f2857f99791232664ecc9e82ea860fdcaa268..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/hrnet/htc_x101_64x4d_fpn_16x1_28e_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = '../htc/htc_x101_64x4d_fpn_16x1_20e_coco.py' -# learning policy -lr_config = dict(step=[24, 27]) -runner = dict(type='EpochBasedRunner', max_epochs=28) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/sabl/sabl_faster_rcnn_r101_fpn_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/sabl/sabl_faster_rcnn_r101_fpn_1x_coco.py deleted file mode 100644 index 4c797cad1c693ba3578fd6852f8d055d3e7406fe..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/sabl/sabl_faster_rcnn_r101_fpn_1x_coco.py +++ /dev/null @@ -1,36 +0,0 @@ -_base_ = [ - '../_base_/models/faster_rcnn_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -model = dict( - pretrained='torchvision://resnet101', - backbone=dict(depth=101), - roi_head=dict( - bbox_head=dict( - _delete_=True, - type='SABLHead', - num_classes=80, - cls_in_channels=256, - reg_in_channels=256, - roi_feat_size=7, - reg_feat_up_ratio=2, - reg_pre_kernel=3, - reg_post_kernel=3, - reg_pre_num=2, - reg_post_num=1, - cls_out_channels=1024, - reg_offset_out_channels=256, - reg_cls_out_channels=256, - num_cls_fcs=1, - num_reg_fcs=0, - reg_class_agnostic=True, - norm_cfg=None, - bbox_coder=dict( - type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, - loss_weight=1.0)))) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/_base_/datasets/stare.py b/spaces/Andy1621/uniformer_image_segmentation/configs/_base_/datasets/stare.py deleted file mode 100644 index 3f71b25488cc11a6b4d582ac52b5a24e1ad1cf8e..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/_base_/datasets/stare.py +++ /dev/null @@ -1,59 +0,0 @@ -# dataset settings -dataset_type = 'STAREDataset' -data_root = 'data/STARE' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -img_scale = (605, 700) -crop_size = (128, 128) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations'), - dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), - dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), - dict(type='RandomFlip', prob=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']) -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=img_scale, - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) -] - -data = dict( - samples_per_gpu=4, - workers_per_gpu=4, - train=dict( - type='RepeatDataset', - times=40000, - dataset=dict( - type=dataset_type, - data_root=data_root, - img_dir='images/training', - ann_dir='annotations/training', - pipeline=train_pipeline)), - val=dict( - type=dataset_type, - data_root=data_root, - img_dir='images/validation', - ann_dir='annotations/validation', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - data_root=data_root, - img_dir='images/validation', - ann_dir='annotations/validation', - pipeline=test_pipeline)) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_480x480_80k_pascal_context_59.py b/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_480x480_80k_pascal_context_59.py deleted file mode 100644 index 795c51f8cff7e057b6c4872de079c179d61c4014..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_480x480_80k_pascal_context_59.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './pspnet_r50-d8_480x480_80k_pascal_context_59.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/Anew5128/Anew51/tts_edge.py b/spaces/Anew5128/Anew51/tts_edge.py deleted file mode 100644 index 7031e18e0b836ec64254e5637f7e10b775c871a0..0000000000000000000000000000000000000000 --- a/spaces/Anew5128/Anew51/tts_edge.py +++ /dev/null @@ -1,34 +0,0 @@ -import io -import edge_tts -import asyncio - - -def get_voices(): - voices = asyncio.run(edge_tts.list_voices()) - return voices - - -async def _iterate_chunks(audio): - async for chunk in audio.stream(): - if chunk["type"] == "audio": - yield chunk["data"] - - -async def _async_generator_to_list(async_gen): - result = [] - async for item in async_gen: - result.append(item) - return result - - -def generate_audio(text: str, voice: str, rate: int) -> bytes: - sign = '+' if rate > 0 else '-' - rate = f'{sign}{abs(rate)}%' - audio = edge_tts.Communicate(text=text, voice=voice, rate=rate) - chunks = asyncio.run(_async_generator_to_list(_iterate_chunks(audio))) - buffer = io.BytesIO() - - for chunk in chunks: - buffer.write(chunk) - - return buffer.getvalue() diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/fileio/handlers/json_handler.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/fileio/handlers/json_handler.py deleted file mode 100644 index 18d4f15f74139d20adff18b20be5529c592a66b6..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/fileio/handlers/json_handler.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import json - -import numpy as np - -from .base import BaseFileHandler - - -def set_default(obj): - """Set default json values for non-serializable values. - - It helps convert ``set``, ``range`` and ``np.ndarray`` data types to list. - It also converts ``np.generic`` (including ``np.int32``, ``np.float32``, - etc.) into plain numbers of plain python built-in types. - """ - if isinstance(obj, (set, range)): - return list(obj) - elif isinstance(obj, np.ndarray): - return obj.tolist() - elif isinstance(obj, np.generic): - return obj.item() - raise TypeError(f'{type(obj)} is unsupported for json dump') - - -class JsonHandler(BaseFileHandler): - - def load_from_fileobj(self, file): - return json.load(file) - - def dump_to_fileobj(self, obj, file, **kwargs): - kwargs.setdefault('default', set_default) - json.dump(obj, file, **kwargs) - - def dump_to_str(self, obj, **kwargs): - kwargs.setdefault('default', set_default) - return json.dumps(obj, **kwargs) diff --git a/spaces/Awesimo/jojogan/model.py b/spaces/Awesimo/jojogan/model.py deleted file mode 100644 index 497bf78d57c54d58cd3b55f26c718be2470a04f1..0000000000000000000000000000000000000000 --- a/spaces/Awesimo/jojogan/model.py +++ /dev/null @@ -1,688 +0,0 @@ -import math -import random -import functools -import operator - -import torch -from torch import nn -from torch.nn import functional as F -from torch.autograd import Function - -from op import conv2d_gradfix -if torch.cuda.is_available(): - from op.fused_act import FusedLeakyReLU, fused_leaky_relu - from op.upfirdn2d import upfirdn2d -else: - from op.fused_act_cpu import FusedLeakyReLU, fused_leaky_relu - from op.upfirdn2d_cpu import upfirdn2d - - -class PixelNorm(nn.Module): - def __init__(self): - super().__init__() - - def forward(self, input): - return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8) - - -def make_kernel(k): - k = torch.tensor(k, dtype=torch.float32) - - if k.ndim == 1: - k = k[None, :] * k[:, None] - - k /= k.sum() - - return k - - -class Upsample(nn.Module): - def __init__(self, kernel, factor=2): - super().__init__() - - self.factor = factor - kernel = make_kernel(kernel) * (factor ** 2) - self.register_buffer("kernel", kernel) - - p = kernel.shape[0] - factor - - pad0 = (p + 1) // 2 + factor - 1 - pad1 = p // 2 - - self.pad = (pad0, pad1) - - def forward(self, input): - out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad) - - return out - - -class Downsample(nn.Module): - def __init__(self, kernel, factor=2): - super().__init__() - - self.factor = factor - kernel = make_kernel(kernel) - self.register_buffer("kernel", kernel) - - p = kernel.shape[0] - factor - - pad0 = (p + 1) // 2 - pad1 = p // 2 - - self.pad = (pad0, pad1) - - def forward(self, input): - out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad) - - return out - - -class Blur(nn.Module): - def __init__(self, kernel, pad, upsample_factor=1): - super().__init__() - - kernel = make_kernel(kernel) - - if upsample_factor > 1: - kernel = kernel * (upsample_factor ** 2) - - self.register_buffer("kernel", kernel) - - self.pad = pad - - def forward(self, input): - out = upfirdn2d(input, self.kernel, pad=self.pad) - - return out - - -class EqualConv2d(nn.Module): - def __init__( - self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True - ): - super().__init__() - - self.weight = nn.Parameter( - torch.randn(out_channel, in_channel, kernel_size, kernel_size) - ) - self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2) - - self.stride = stride - self.padding = padding - - if bias: - self.bias = nn.Parameter(torch.zeros(out_channel)) - - else: - self.bias = None - - def forward(self, input): - out = conv2d_gradfix.conv2d( - input, - self.weight * self.scale, - bias=self.bias, - stride=self.stride, - padding=self.padding, - ) - - return out - - def __repr__(self): - return ( - f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]}," - f" {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})" - ) - - -class EqualLinear(nn.Module): - def __init__( - self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None - ): - super().__init__() - - self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) - - if bias: - self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) - - else: - self.bias = None - - self.activation = activation - - self.scale = (1 / math.sqrt(in_dim)) * lr_mul - self.lr_mul = lr_mul - - def forward(self, input): - if self.activation: - out = F.linear(input, self.weight * self.scale) - out = fused_leaky_relu(out, self.bias * self.lr_mul) - - else: - out = F.linear( - input, self.weight * self.scale, bias=self.bias * self.lr_mul - ) - - return out - - def __repr__(self): - return ( - f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})" - ) - - -class ModulatedConv2d(nn.Module): - def __init__( - self, - in_channel, - out_channel, - kernel_size, - style_dim, - demodulate=True, - upsample=False, - downsample=False, - blur_kernel=[1, 3, 3, 1], - fused=True, - ): - super().__init__() - - self.eps = 1e-8 - self.kernel_size = kernel_size - self.in_channel = in_channel - self.out_channel = out_channel - self.upsample = upsample - self.downsample = downsample - - if upsample: - factor = 2 - p = (len(blur_kernel) - factor) - (kernel_size - 1) - pad0 = (p + 1) // 2 + factor - 1 - pad1 = p // 2 + 1 - - self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor) - - if downsample: - factor = 2 - p = (len(blur_kernel) - factor) + (kernel_size - 1) - pad0 = (p + 1) // 2 - pad1 = p // 2 - - self.blur = Blur(blur_kernel, pad=(pad0, pad1)) - - fan_in = in_channel * kernel_size ** 2 - self.scale = 1 / math.sqrt(fan_in) - self.padding = kernel_size // 2 - - self.weight = nn.Parameter( - torch.randn(1, out_channel, in_channel, kernel_size, kernel_size) - ) - - self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) - - self.demodulate = demodulate - self.fused = fused - - def __repr__(self): - return ( - f"{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, " - f"upsample={self.upsample}, downsample={self.downsample})" - ) - - def forward(self, input, style): - batch, in_channel, height, width = input.shape - - if not self.fused: - weight = self.scale * self.weight.squeeze(0) - style = self.modulation(style) - - if self.demodulate: - w = weight.unsqueeze(0) * style.view(batch, 1, in_channel, 1, 1) - dcoefs = (w.square().sum((2, 3, 4)) + 1e-8).rsqrt() - - input = input * style.reshape(batch, in_channel, 1, 1) - - if self.upsample: - weight = weight.transpose(0, 1) - out = conv2d_gradfix.conv_transpose2d( - input, weight, padding=0, stride=2 - ) - out = self.blur(out) - - elif self.downsample: - input = self.blur(input) - out = conv2d_gradfix.conv2d(input, weight, padding=0, stride=2) - - else: - out = conv2d_gradfix.conv2d(input, weight, padding=self.padding) - - if self.demodulate: - out = out * dcoefs.view(batch, -1, 1, 1) - - return out - - style = self.modulation(style).view(batch, 1, in_channel, 1, 1) - weight = self.scale * self.weight * style - - if self.demodulate: - demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8) - weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) - - weight = weight.view( - batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size - ) - - if self.upsample: - input = input.view(1, batch * in_channel, height, width) - weight = weight.view( - batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size - ) - weight = weight.transpose(1, 2).reshape( - batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size - ) - out = conv2d_gradfix.conv_transpose2d( - input, weight, padding=0, stride=2, groups=batch - ) - _, _, height, width = out.shape - out = out.view(batch, self.out_channel, height, width) - out = self.blur(out) - - elif self.downsample: - input = self.blur(input) - _, _, height, width = input.shape - input = input.view(1, batch * in_channel, height, width) - out = conv2d_gradfix.conv2d( - input, weight, padding=0, stride=2, groups=batch - ) - _, _, height, width = out.shape - out = out.view(batch, self.out_channel, height, width) - - else: - input = input.view(1, batch * in_channel, height, width) - out = conv2d_gradfix.conv2d( - input, weight, padding=self.padding, groups=batch - ) - _, _, height, width = out.shape - out = out.view(batch, self.out_channel, height, width) - - return out - - -class NoiseInjection(nn.Module): - def __init__(self): - super().__init__() - - self.weight = nn.Parameter(torch.zeros(1)) - - def forward(self, image, noise=None): - if noise is None: - batch, _, height, width = image.shape - noise = image.new_empty(batch, 1, height, width).normal_() - - return image + self.weight * noise - - -class ConstantInput(nn.Module): - def __init__(self, channel, size=4): - super().__init__() - - self.input = nn.Parameter(torch.randn(1, channel, size, size)) - - def forward(self, input): - batch = input.shape[0] - out = self.input.repeat(batch, 1, 1, 1) - - return out - - -class StyledConv(nn.Module): - def __init__( - self, - in_channel, - out_channel, - kernel_size, - style_dim, - upsample=False, - blur_kernel=[1, 3, 3, 1], - demodulate=True, - ): - super().__init__() - - self.conv = ModulatedConv2d( - in_channel, - out_channel, - kernel_size, - style_dim, - upsample=upsample, - blur_kernel=blur_kernel, - demodulate=demodulate, - ) - - self.noise = NoiseInjection() - # self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1)) - # self.activate = ScaledLeakyReLU(0.2) - self.activate = FusedLeakyReLU(out_channel) - - def forward(self, input, style, noise=None): - out = self.conv(input, style) - out = self.noise(out, noise=noise) - # out = out + self.bias - out = self.activate(out) - - return out - - -class ToRGB(nn.Module): - def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): - super().__init__() - - if upsample: - self.upsample = Upsample(blur_kernel) - - self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False) - self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) - - def forward(self, input, style, skip=None): - out = self.conv(input, style) - out = out + self.bias - - if skip is not None: - skip = self.upsample(skip) - - out = out + skip - - return out - - -class Generator(nn.Module): - def __init__( - self, - size, - style_dim, - n_mlp, - channel_multiplier=2, - blur_kernel=[1, 3, 3, 1], - lr_mlp=0.01, - ): - super().__init__() - - self.size = size - - self.style_dim = style_dim - - layers = [PixelNorm()] - - for i in range(n_mlp): - layers.append( - EqualLinear( - style_dim, style_dim, lr_mul=lr_mlp, activation="fused_lrelu" - ) - ) - - self.style = nn.Sequential(*layers) - - self.channels = { - 4: 512, - 8: 512, - 16: 512, - 32: 512, - 64: 256 * channel_multiplier, - 128: 128 * channel_multiplier, - 256: 64 * channel_multiplier, - 512: 32 * channel_multiplier, - 1024: 16 * channel_multiplier, - } - - self.input = ConstantInput(self.channels[4]) - self.conv1 = StyledConv( - self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel - ) - self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False) - - self.log_size = int(math.log(size, 2)) - self.num_layers = (self.log_size - 2) * 2 + 1 - - self.convs = nn.ModuleList() - self.upsamples = nn.ModuleList() - self.to_rgbs = nn.ModuleList() - self.noises = nn.Module() - - in_channel = self.channels[4] - - for layer_idx in range(self.num_layers): - res = (layer_idx + 5) // 2 - shape = [1, 1, 2 ** res, 2 ** res] - self.noises.register_buffer(f"noise_{layer_idx}", torch.randn(*shape)) - - for i in range(3, self.log_size + 1): - out_channel = self.channels[2 ** i] - - self.convs.append( - StyledConv( - in_channel, - out_channel, - 3, - style_dim, - upsample=True, - blur_kernel=blur_kernel, - ) - ) - - self.convs.append( - StyledConv( - out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel - ) - ) - - self.to_rgbs.append(ToRGB(out_channel, style_dim)) - - in_channel = out_channel - - self.n_latent = self.log_size * 2 - 2 - - def make_noise(self): - device = self.input.input.device - - noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)] - - for i in range(3, self.log_size + 1): - for _ in range(2): - noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device)) - - return noises - - @torch.no_grad() - def mean_latent(self, n_latent): - latent_in = torch.randn( - n_latent, self.style_dim, device=self.input.input.device - ) - latent = self.style(latent_in).mean(0, keepdim=True) - - return latent - - @torch.no_grad() - def get_latent(self, input): - return self.style(input) - - def forward( - self, - styles, - return_latents=False, - inject_index=None, - truncation=1, - truncation_latent=None, - input_is_latent=False, - noise=None, - randomize_noise=True, - ): - - if noise is None: - if randomize_noise: - noise = [None] * self.num_layers - else: - noise = [ - getattr(self.noises, f"noise_{i}") for i in range(self.num_layers) - ] - - if not input_is_latent: - styles = [self.style(s) for s in styles] - - if truncation < 1: - style_t = [] - - for style in styles: - style_t.append( - truncation_latent + truncation * (style - truncation_latent) - ) - - styles = style_t - latent = styles[0].unsqueeze(1).repeat(1, self.n_latent, 1) - else: - latent = styles - - out = self.input(latent) - out = self.conv1(out, latent[:, 0], noise=noise[0]) - - skip = self.to_rgb1(out, latent[:, 1]) - - i = 1 - for conv1, conv2, noise1, noise2, to_rgb in zip( - self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs - ): - out = conv1(out, latent[:, i], noise=noise1) - out = conv2(out, latent[:, i + 1], noise=noise2) - skip = to_rgb(out, latent[:, i + 2], skip) - - i += 2 - - image = skip - - return image - - -class ConvLayer(nn.Sequential): - def __init__( - self, - in_channel, - out_channel, - kernel_size, - downsample=False, - blur_kernel=[1, 3, 3, 1], - bias=True, - activate=True, - ): - layers = [] - - if downsample: - factor = 2 - p = (len(blur_kernel) - factor) + (kernel_size - 1) - pad0 = (p + 1) // 2 - pad1 = p // 2 - - layers.append(Blur(blur_kernel, pad=(pad0, pad1))) - - stride = 2 - self.padding = 0 - - else: - stride = 1 - self.padding = kernel_size // 2 - - layers.append( - EqualConv2d( - in_channel, - out_channel, - kernel_size, - padding=self.padding, - stride=stride, - bias=bias and not activate, - ) - ) - - if activate: - layers.append(FusedLeakyReLU(out_channel, bias=bias)) - - super().__init__(*layers) - - -class ResBlock(nn.Module): - def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]): - super().__init__() - - self.conv1 = ConvLayer(in_channel, in_channel, 3) - self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True) - - self.skip = ConvLayer( - in_channel, out_channel, 1, downsample=True, activate=False, bias=False - ) - - def forward(self, input): - out = self.conv1(input) - out = self.conv2(out) - - skip = self.skip(input) - out = (out + skip) / math.sqrt(2) - - return out - - -class Discriminator(nn.Module): - def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1]): - super().__init__() - - channels = { - 4: 512, - 8: 512, - 16: 512, - 32: 512, - 64: 256 * channel_multiplier, - 128: 128 * channel_multiplier, - 256: 64 * channel_multiplier, - 512: 32 * channel_multiplier, - 1024: 16 * channel_multiplier, - } - - convs = [ConvLayer(3, channels[size], 1)] - - log_size = int(math.log(size, 2)) - - in_channel = channels[size] - - for i in range(log_size, 2, -1): - out_channel = channels[2 ** (i - 1)] - - convs.append(ResBlock(in_channel, out_channel, blur_kernel)) - - in_channel = out_channel - - self.convs = nn.Sequential(*convs) - - self.stddev_group = 4 - self.stddev_feat = 1 - - self.final_conv = ConvLayer(in_channel + 1, channels[4], 3) - self.final_linear = nn.Sequential( - EqualLinear(channels[4] * 4 * 4, channels[4], activation="fused_lrelu"), - EqualLinear(channels[4], 1), - ) - - def forward(self, input): - out = self.convs(input) - - batch, channel, height, width = out.shape - group = min(batch, self.stddev_group) - stddev = out.view( - group, -1, self.stddev_feat, channel // self.stddev_feat, height, width - ) - stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8) - stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2) - stddev = stddev.repeat(group, 1, height, width) - out = torch.cat([out, stddev], 1) - - out = self.final_conv(out) - - out = out.view(batch, -1) - out = self.final_linear(out) - - return out - diff --git a/spaces/BAAI/AltDiffusion/app.py b/spaces/BAAI/AltDiffusion/app.py deleted file mode 100644 index b89b1683d9f2985168a1d3afcee478ad0e5de3fd..0000000000000000000000000000000000000000 --- a/spaces/BAAI/AltDiffusion/app.py +++ /dev/null @@ -1,330 +0,0 @@ -import io -import re -import imp -import time -import json -import base64 -import requests -import gradio as gr -import ui_functions as uifn -from css_and_js import js, call_JS -from PIL import Image, PngImagePlugin, ImageChops - -url_host = "https://flagstudio.baai.ac.cn" -token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyX2lkIjoiZjAxOGMxMzJiYTUyNDBjMzk5NTMzYTI5YjBmMzZiODMiLCJhcHBfbmFtZSI6IndlYiIsImlkZW50aXR5X3R5cGUiOiIyIiwidXNlcl9yb2xlIjoiMiIsImp0aSI6IjVjMmQzMjdiLWI5Y2MtNDhiZS1hZWQ4LTllMjQ4MDk4NzMxYyIsIm5iZiI6MTY2OTAwNjE5NywiZXhwIjoxOTg0MzY2MTk3LCJpYXQiOjE2NjkwMDYxOTd9.9B3MDk8wA6iWH5puXjcD19tJJ4Ox7mdpRyWZs5Kwt70" - -def read_content(file_path: str) -> str: - """read the content of target file - """ - with open(file_path, 'r', encoding='utf-8') as f: - content = f.read() - - return content - -def filter_content(raw_style: str): - if "(" in raw_style: - i = raw_style.index("(") - else : - i = -1 - - if i == -1: - return raw_style - else : - return raw_style[:i] - -def upload_image(img): - url = url_host + "/api/v1/image/get-upload-link" - headers = {"token": token} - r = requests.post(url, json={}, headers=headers) - if r.status_code != 200: - raise gr.Error(r.reason) - head_res = r.json() - if head_res["code"] != 0: - raise gr.Error("Unknown error") - image_id = head_res["data"]["image_id"] - image_url = head_res["data"]["url"] - image_headers = head_res["data"]["headers"] - - imgBytes = io.BytesIO() - img.save(imgBytes, "PNG") - imgBytes = imgBytes.getvalue() - - r = requests.put(image_url, data=imgBytes, headers=image_headers) - if r.status_code != 200: - raise gr.Error(r.reason) - return image_id, image_url - -def post_reqest(seed, prompt, width, height, image_num, img=None, mask=None): - data = { - "type": "gen-image", - "parameters": { - "width": width, # output height width - "height": height, # output image height - "prompts": [prompt], - } - } - data["parameters"]["seed"] = int(seed) - if img is not None: - # Upload image - image_id, image_url = upload_image(img) - data["parameters"]["init_image"] = { - "image_id": image_id, - "url": image_url, - "width": img.width, - "height": img.height, - } - if mask is not None: - # Upload mask - extrama = mask.convert("L").getextrema() - if extrama[1] > 0: - mask_id, mask_url = upload_image(mask) - data["parameters"]["mask_image"] = { - "image_id": mask_id, - "url": mask_url, - "width": mask.width, - "height": mask.height, - } - headers = {"token": token} - - # Send create task request - all_task_data = [] - url = url_host+"/api/v1/task/create" - for _ in range(image_num): - r = requests.post(url, json=data, headers=headers) - if r.status_code != 200: - raise gr.Error(r.reason) - create_res = r.json() - if create_res['code'] == 3002: - raise gr.Error("Inappropriate prompt detected.") - elif create_res['code'] != 0: - raise gr.Error("Unknown error") - all_task_data.append(create_res["data"]) - - # Get result - url = url_host+"/api/v1/task/status" - images = [] - while True: - if len(all_task_data) <= 0: - return images - for i in range(len(all_task_data)-1, -1, -1): - data = all_task_data[i] - r = requests.post(url, json=data, headers=headers) - if r.status_code != 200: - raise gr.Error(r.reason) - res = r.json() - if res["code"] == 6002: - # Running - continue - if res["code"] == 6005: - raise gr.Error("NSFW image detected.") - elif res["code"] == 0: - # Finished - for img_info in res["data"]["images"]: - img_res = requests.get(img_info["url"]) - images.append(Image.open(io.BytesIO(img_res.content)).convert("RGB")) - del all_task_data[i] - else: - raise gr.Error(f"Error code: {res['code']}") - time.sleep(1) - -def request_images(raw_text, class_draw, style_draw, batch_size, w, h, seed): - if filter_content(class_draw) != "国画": - if filter_content(class_draw) != "通用": - raw_text = raw_text + f",{filter_content(class_draw)}" - - for sty in style_draw: - raw_text = raw_text + f",{filter_content(sty)}" - elif filter_content(class_draw) == "国画": - raw_text = raw_text + ",国画,水墨画,大作,黑白,高清,传统" - print(f"raw text is {raw_text}") - - images = post_reqest(seed, raw_text, w, h, int(batch_size)) - - return images - - -def img2img(prompt, image_and_mask): - if image_and_mask["image"].width <= image_and_mask["image"].height: - width = 512 - height = int((width/image_and_mask["image"].width)*image_and_mask["image"].height) - else: - height = 512 - width = int((height/image_and_mask["image"].height)*image_and_mask["image"].width) - return post_reqest(0, prompt, width, height, 1, image_and_mask["image"], image_and_mask["mask"]) - - -examples = [ - '水墨蝴蝶和牡丹花,国画', - '苍劲有力的墨竹,国画', - '暴风雨中的灯塔', - '机械小松鼠,科学幻想', - '中国水墨山水画,国画', - "Lighthouse in the storm", - "A dog", - "Landscape by 张大千", - "A tiger 长了兔子耳朵", - "A baby bird 铅笔素描", -] - -if __name__ == "__main__": - block = gr.Blocks(css=read_content('style.css')) - - with block: - gr.HTML(read_content("header.html")) - with gr.Tabs(elem_id='tabss') as tabs: - - with gr.TabItem("文生图(Text-to-img)", id='txt2img_tab'): - - with gr.Group(): - with gr.Box(): - with gr.Row().style(mobile_collapse=False, equal_height=True): - text = gr.Textbox( - label="Prompt", - show_label=False, - max_lines=1, - placeholder="Input text(输入文字)", - interactive=True, - ).style( - border=(True, False, True, True), - rounded=(True, False, False, True), - container=False, - ) - - btn = gr.Button("Generate image").style( - margin=False, - rounded=(True, True, True, True), - ) - with gr.Row().style(mobile_collapse=False, equal_height=True): - class_draw = gr.Radio(choices=["通用(general)","国画(traditional Chinese painting)",], value="通用(general)", show_label=True, label='生成类型(type)') - # class_draw = gr.Dropdown(["通用(general)", "国画(traditional Chinese painting)", - # "照片,摄影(picture photography)", "油画(oil painting)", - # "铅笔素描(pencil sketch)", "CG", - # "水彩画(watercolor painting)", "水墨画(ink and wash)", - # "插画(illustrations)", "3D", "图生图(img2img)"], - # label="生成类型(type)", - # show_label=True, - # value="通用(general)") - with gr.Row().style(mobile_collapse=False, equal_height=True): - style_draw = gr.CheckboxGroup(["蒸汽朋克(steampunk)", "电影摄影风格(film photography)", - "概念艺术(concept art)", "Warming lighting", - "Dramatic lighting", "Natural lighting", - "虚幻引擎(unreal engine)", "4k", "8k", - "充满细节(full details)"], - label="画面风格(style)", - show_label=True, - ) - with gr.Row().style(mobile_collapse=False, equal_height=True): - # sample_size = gr.Slider(minimum=1, - # maximum=4, - # step=1, - # label="生成数量(number)", - # show_label=True, - # interactive=True, - # ) - sample_size = gr.Radio(choices=["1","2","3","4"], value="1", show_label=True, label='生成数量(number)') - seed = gr.Number(0, label='seed', interactive=True) - with gr.Row().style(mobile_collapse=False, equal_height=True): - w = gr.Slider(512,1024,value=512, step=64, label="width") - h = gr.Slider(512,1024,value=512, step=64, label="height") - - gallery = gr.Gallery( - label="Generated images", show_label=False, elem_id="gallery" - ).style(grid=[2,2]) - gr.Examples(examples=examples, fn=request_images, inputs=text, outputs=gallery, examples_per_page=100) - with gr.Row().style(mobile_collapse=False, equal_height=True): - img_choices = gr.Dropdown(["图片1(img1)"],label='请选择一张图片发送到图生图',show_label=True,value="图片1(img1)") - with gr.Row().style(mobile_collapse=False, equal_height=True): - output_txt2img_copy_to_input_btn = gr.Button("发送图片到图生图(Sent the image to img2img)").style( - margin=False, - rounded=(True, True, True, True), - ) - - with gr.Row(): - prompt = gr.Markdown("提示(Prompt):", visible=False) - with gr.Row(): - move_prompt_zh = gr.Markdown("请移至图生图部分进行编辑(拉到顶部)", visible=False) - with gr.Row(): - move_prompt_en = gr.Markdown("Please move to the img2img section for editing(Pull to the top)", visible=False) - - - - text.submit(request_images, inputs=[text, class_draw, style_draw, sample_size, w, h, seed], outputs=gallery) - btn.click(request_images, inputs=[text, class_draw, style_draw, sample_size, w, h, seed], outputs=gallery) - - sample_size.change( - fn=uifn.change_img_choices, - inputs=[sample_size], - outputs=[img_choices] - ) - - with gr.TabItem("图生图(Img-to-Img)", id="img2img_tab"): - with gr.Row(elem_id="prompt_row"): - img2img_prompt = gr.Textbox(label="Prompt", - elem_id='img2img_prompt_input', - placeholder="神奇的森林,流淌的河流.", - lines=1, - max_lines=1, - value="", - show_label=False).style() - - img2img_btn_mask = gr.Button("Generate", variant="primary", visible=False, - elem_id="img2img_mask_btn") - img2img_btn_editor = gr.Button("Generate", variant="primary", elem_id="img2img_edit_btn") - gr.Markdown('#### 输入图像') - with gr.Row().style(equal_height=False): - #with gr.Column(): - img2img_image_mask = gr.Image( - value=None, - source="upload", - interactive=True, - tool="sketch", - type='pil', - elem_id="img2img_mask", - image_mode="RGBA" - ) - gr.Markdown('#### 编辑后的图片') - with gr.Row(): - output_img2img_gallery = gr.Gallery(label="Images", elem_id="img2img_gallery_output").style( - grid=[4,4,4] ) - with gr.Row(): - gr.Markdown('提示(prompt):') - with gr.Row(): - gr.Markdown('请选择一张图像掩盖掉一部分区域,并输入文本描述') - with gr.Row(): - gr.Markdown('Please select an image to cover up a part of the area and enter a text description.') - gr.Markdown('# 编辑设置',visible=False) - - - output_txt2img_copy_to_input_btn.click( - uifn.copy_img_to_input, - [gallery, img_choices], - [tabs, img2img_image_mask, move_prompt_zh, move_prompt_en, prompt] - ) - - - img2img_func = img2img - img2img_inputs = [img2img_prompt, img2img_image_mask] - img2img_outputs = [output_img2img_gallery] - - img2img_btn_mask.click( - img2img_func, - img2img_inputs, - img2img_outputs - ) - - def img2img_submit_params(): - return (img2img_func, - img2img_inputs, - img2img_outputs) - - img2img_btn_editor.click(*img2img_submit_params()) - - # GENERATE ON ENTER - img2img_prompt.submit(None, None, None, - _js=call_JS("clickFirstVisibleButton", - rowId="prompt_row")) - - gr.HTML(read_content("footer.html")) - # gr.Image('./contributors.png') - - block.queue(max_size=512, concurrency_count=256).launch() diff --git a/spaces/Bart92/RVC_HF/utils/dependency.py b/spaces/Bart92/RVC_HF/utils/dependency.py deleted file mode 100644 index b70338b02d31b1ef455fbac817d418d328db518d..0000000000000000000000000000000000000000 --- a/spaces/Bart92/RVC_HF/utils/dependency.py +++ /dev/null @@ -1,170 +0,0 @@ -import os -import csv -import shutil -import tarfile -import subprocess -from pathlib import Path -from datetime import datetime - -def install_packages_but_jank_af(): - packages = ['build-essential', 'python3-dev', 'ffmpeg', 'aria2'] - pip_packages = ['pip', 'setuptools', 'wheel', 'httpx==0.23.0', 'faiss-gpu', 'fairseq', 'gradio==3.34.0', - 'ffmpeg', 'ffmpeg-python', 'praat-parselmouth', 'pyworld', 'numpy==1.23.5', - 'numba==0.56.4', 'librosa==0.9.2', 'mega.py', 'gdown', 'onnxruntime', 'pyngrok==4.1.12', - 'gTTS', 'elevenlabs', 'wget', 'tensorboardX', 'unidecode', 'huggingface-hub', 'stftpitchshift==1.5.1', - 'yt-dlp', 'pedalboard', 'pathvalidate', 'nltk', 'edge-tts', 'git+https://github.com/suno-ai/bark.git', 'python-dotenv' , 'av'] - - print("Updating and installing system packages...") - for package in packages: - print(f"Installing {package}...") - subprocess.check_call(['apt-get', 'install', '-qq', '-y', package]) - - print("Updating and installing pip packages...") - subprocess.check_call(['pip', 'install', '--upgrade'] + pip_packages) - - print('Packages up to date.') - - -def setup_environment(ForceUpdateDependencies, ForceTemporaryStorage): - # Mounting Google Drive - if not ForceTemporaryStorage: - from google.colab import drive - - if not os.path.exists('/content/drive'): - drive.mount('/content/drive') - else: - print('Drive is already mounted. Proceeding...') - - # Function to install dependencies with progress - def install_packages(): - packages = ['build-essential', 'python3-dev', 'ffmpeg', 'aria2'] - pip_packages = ['pip', 'setuptools', 'wheel', 'httpx==0.23.0', 'faiss-gpu', 'fairseq', 'gradio==3.34.0', - 'ffmpeg', 'ffmpeg-python', 'praat-parselmouth', 'pyworld', 'numpy==1.23.5', - 'numba==0.56.4', 'librosa==0.9.2', 'mega.py', 'gdown', 'onnxruntime', 'pyngrok==4.1.12', - 'gTTS', 'elevenlabs', 'wget', 'tensorboardX', 'unidecode', 'huggingface-hub', 'stftpitchshift==1.5.1', - 'yt-dlp', 'pedalboard', 'pathvalidate', 'nltk', 'edge-tts', 'git+https://github.com/suno-ai/bark.git', 'python-dotenv' , 'av'] - - print("Updating and installing system packages...") - for package in packages: - print(f"Installing {package}...") - subprocess.check_call(['apt-get', 'install', '-qq', '-y', package]) - - print("Updating and installing pip packages...") - subprocess.check_call(['pip', 'install', '--upgrade'] + pip_packages) - - - print('Packages up to date.') - - # Function to scan a directory and writes filenames and timestamps - def scan_and_write(base_path, output_file): - with open(output_file, 'w', newline='') as f: - writer = csv.writer(f) - for dirpath, dirs, files in os.walk(base_path): - for filename in files: - fname = os.path.join(dirpath, filename) - try: - mtime = os.path.getmtime(fname) - writer.writerow([fname, mtime]) - except Exception as e: - print(f'Skipping irrelevant nonexistent file {fname}: {str(e)}') - print(f'Finished recording filesystem timestamps to {output_file}.') - - # Function to compare files - def compare_files(old_file, new_file): - old_files = {} - new_files = {} - - with open(old_file, 'r') as f: - reader = csv.reader(f) - old_files = {rows[0]:rows[1] for rows in reader} - - with open(new_file, 'r') as f: - reader = csv.reader(f) - new_files = {rows[0]:rows[1] for rows in reader} - - removed_files = old_files.keys() - new_files.keys() - added_files = new_files.keys() - old_files.keys() - unchanged_files = old_files.keys() & new_files.keys() - - changed_files = {f for f in unchanged_files if old_files[f] != new_files[f]} - - for file in removed_files: - print(f'File has been removed: {file}') - - for file in changed_files: - print(f'File has been updated: {file}') - - return list(added_files) + list(changed_files) - - # Check if CachedRVC.tar.gz exists - if ForceTemporaryStorage: - file_path = '/content/CachedRVC.tar.gz' - else: - file_path = '/content/drive/MyDrive/RVC_Cached/CachedRVC.tar.gz' - - content_file_path = '/content/CachedRVC.tar.gz' - extract_path = '/' - - if not os.path.exists(file_path): - folder_path = os.path.dirname(file_path) - os.makedirs(folder_path, exist_ok=True) - print('No cached dependency install found. Attempting to download GitHub backup..') - - try: - download_url = "https://github.com/kalomaze/QuickMangioFixes/releases/download/release3/CachedRVC.tar.gz" - subprocess.run(["wget", "-O", file_path, download_url]) - print('Download completed successfully!') - except Exception as e: - print('Download failed:', str(e)) - - # Delete the failed download file - if os.path.exists(file_path): - os.remove(file_path) - print('Failed download file deleted. Continuing manual backup..') - - if Path(file_path).exists(): - if ForceTemporaryStorage: - print('Finished downloading CachedRVC.tar.gz.') - else: - print('CachedRVC.tar.gz found on Google Drive. Proceeding to copy and extract...') - - # Check if ForceTemporaryStorage is True and skip copying if it is - if ForceTemporaryStorage: - pass - else: - shutil.copy(file_path, content_file_path) - - print('Beginning backup copy operation...') - - with tarfile.open(content_file_path, 'r:gz') as tar: - for member in tar.getmembers(): - target_path = os.path.join(extract_path, member.name) - try: - tar.extract(member, extract_path) - except Exception as e: - print('Failed to extract a file (this isn\'t normal)... forcing an update to compensate') - ForceUpdateDependencies = True - print(f'Extraction of {content_file_path} to {extract_path} completed.') - - if ForceUpdateDependencies: - install_packages() - ForceUpdateDependencies = False - else: - print('CachedRVC.tar.gz not found. Proceeding to create an index of all current files...') - scan_and_write('/usr/', '/content/usr_files.csv') - - install_packages() - - scan_and_write('/usr/', '/content/usr_files_new.csv') - changed_files = compare_files('/content/usr_files.csv', '/content/usr_files_new.csv') - - with tarfile.open('/content/CachedRVC.tar.gz', 'w:gz') as new_tar: - for file in changed_files: - new_tar.add(file) - print(f'Added to tar: {file}') - - os.makedirs('/content/drive/MyDrive/RVC_Cached', exist_ok=True) - shutil.copy('/content/CachedRVC.tar.gz', '/content/drive/MyDrive/RVC_Cached/CachedRVC.tar.gz') - print('Updated CachedRVC.tar.gz copied to Google Drive.') - print('Dependencies fully up to date; future runs should be faster.') - diff --git a/spaces/BartPoint/VoiceChange_Beta/infer_pack/modules/F0Predictor/DioF0Predictor.py b/spaces/BartPoint/VoiceChange_Beta/infer_pack/modules/F0Predictor/DioF0Predictor.py deleted file mode 100644 index eb60d8830714338448be009d1075e3594337db15..0000000000000000000000000000000000000000 --- a/spaces/BartPoint/VoiceChange_Beta/infer_pack/modules/F0Predictor/DioF0Predictor.py +++ /dev/null @@ -1,90 +0,0 @@ -from infer_pack.modules.F0Predictor.F0Predictor import F0Predictor -import pyworld -import numpy as np - - -class DioF0Predictor(F0Predictor): - def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - self.sampling_rate = sampling_rate - - def interpolate_f0(self, f0): - """ - 对F0进行插值处理 - """ - - data = np.reshape(f0, (f0.size, 1)) - - vuv_vector = np.zeros((data.size, 1), dtype=np.float32) - vuv_vector[data > 0.0] = 1.0 - vuv_vector[data <= 0.0] = 0.0 - - ip_data = data - - frame_number = data.size - last_value = 0.0 - for i in range(frame_number): - if data[i] <= 0.0: - j = i + 1 - for j in range(i + 1, frame_number): - if data[j] > 0.0: - break - if j < frame_number - 1: - if last_value > 0.0: - step = (data[j] - data[i - 1]) / float(j - i) - for k in range(i, j): - ip_data[k] = data[i - 1] + step * (k - i + 1) - else: - for k in range(i, j): - ip_data[k] = data[j] - else: - for k in range(i, frame_number): - ip_data[k] = last_value - else: - ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 - last_value = data[i] - - return ip_data[:, 0], vuv_vector[:, 0] - - def resize_f0(self, x, target_len): - source = np.array(x) - source[source < 0.001] = np.nan - target = np.interp( - np.arange(0, len(source) * target_len, len(source)) / target_len, - np.arange(0, len(source)), - source, - ) - res = np.nan_to_num(target) - return res - - def compute_f0(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.dio( - wav.astype(np.double), - fs=self.sampling_rate, - f0_floor=self.f0_min, - f0_ceil=self.f0_max, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) - for index, pitch in enumerate(f0): - f0[index] = round(pitch, 1) - return self.interpolate_f0(self.resize_f0(f0, p_len))[0] - - def compute_f0_uv(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.dio( - wav.astype(np.double), - fs=self.sampling_rate, - f0_floor=self.f0_min, - f0_ceil=self.f0_max, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) - for index, pitch in enumerate(f0): - f0[index] = round(pitch, 1) - return self.interpolate_f0(self.resize_f0(f0, p_len)) diff --git a/spaces/Benson/text-generation/Examples/Casa De Diseo Fijar Y Flip Mod Apk Pc.md b/spaces/Benson/text-generation/Examples/Casa De Diseo Fijar Y Flip Mod Apk Pc.md deleted file mode 100644 index 9639e129daaf0958f77a28855f2e71f04e6bb2a3..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Casa De Diseo Fijar Y Flip Mod Apk Pc.md +++ /dev/null @@ -1,101 +0,0 @@ - -

Casa Diseñador Fix y Flip Mod APK PC: Cómo jugar a este divertido juego de simulación en su ordenador

-

¿Te encanta el diseño de interiores y mejoras para el hogar? ¿Te gusta jugar juegos de simulación donde puedes dar rienda suelta a tu creatividad e imaginación? Si respondiste sí a ambas preguntas, entonces es posible que desees echar un vistazo a House Designer Fix and Flip, un juego donde puedes comprar, renovar y vender casas. Y la mejor parte es, usted puede jugar a este juego en su PC con un archivo APK mod. En este artículo, le diremos todo lo que necesita saber sobre House Designer Fix y Flip mod APK PC, incluyendo lo que es, cómo descargar e instalar, y algunos consejos y trucos para jugarlo.

-

¿Qué es House Designer Fix and Flip?

-

Un juego de simulación donde puedes renovar y decorar casas

-

House Designer Fix and Flip es un juego de simulación desarrollado por Karate Goose Studio. Está disponible para dispositivos Android, pero también se puede jugar en su PC con un emulador. En este juego, puedes comprar casas viejas y rotas, arreglarlas y venderlas para obtener ganancias. También puede diseñar su propia casa de acuerdo a su gusto y estilo. Puede elegir entre una variedad de muebles, electrodomésticos, papeles pintados, pinturas, pisos, ventanas, puertas y más. También puede trabajar en el exterior de la casa, como el jardín, el patio, la valla y el techo. Incluso puedes demoler algunas paredes o construir otras nuevas si quieres.

-

casa de diseño fijar y flip mod apk pc


Download Zip ✔✔✔ https://bltlly.com/2v6MqR



-

Características y jugabilidad de House Designer Fix y Flip

-

House Designer Fix and Flip tiene muchas características que lo hacen divertido y realista. Algunas de ellas son:

- -

La jugabilidad de House Designer Fix and Flip es simple e intuitiva. Solo tienes que tocar la pantalla para seleccionar o usar un elemento o herramienta. También puede arrastrar o rotar elementos para colocarlos donde desee. Puede acercar o alejar los detalles de la casa. También puede cambiar entre diferentes vistas, como primera persona o tercera persona.

-

¿Por qué jugar House Designer Fix y Flip en el PC?

-

Beneficios de jugar en una pantalla más grande con mejores gráficos y rendimiento

-

Si bien House Designer Fix and Flip es un gran juego para dispositivos móviles, puede ser aún mejor si lo juegas en tu PC. Estos son algunos de los beneficios de jugar House Designer Fix y Flip en PC:

- -

Cómo descargar e instalar House Designer Fix and Flip en el PC utilizando un emulador

-

Para jugar House Designer Fix and Flip en PC, es necesario utilizar un emulador. Un emulador es un software que le permite ejecutar aplicaciones Android en su PC. Hay muchos emuladores disponibles en línea, pero recomendamos usar BlueStacks, ya que es uno de los más populares y confiables. Estos son los pasos para descargar e instalar House Designer Fix and Flip en PC usando BlueStacks:

-
    - -
  1. Inicie BlueStacks e inicie sesión con su cuenta de Google.
  2. -
  3. Ir a la tienda de Google Play y buscar Casa Diseñador Fix y Flip.
  4. -
  5. Descargar e instalar House Designer Fijar y voltear en BlueStacks.
  6. -
  7. Alternativamente, también puede descargar el archivo House Designer Fix y Flip mod APK de una fuente de confianza, como https://apkpure.com/, y arrastrarlo y soltarlo en BlueStacks para instalarlo.
  8. -
  9. Una vez que la instalación se haya completado, puede iniciar House Designer Fix y Flip en BlueStacks y comenzar a jugar.
  10. -
-

Consejos y trucos para jugar House Designer Fix and Flip en PC

-

Cómo usar las herramientas y elementos en el juego

-

Para utilizar las herramientas y los elementos en el juego, es necesario tocar los iconos en la parte inferior de la pantalla. También puede usar los atajos de teclado para acceder más rápido. Estos son algunos de los atajos de teclado que puede usar:

- - -Icono -Herramienta/artículo -Atajo de teclado - - -Hammer -Martillo -H - - -Taladro -Taladro -D - - -Saw -Sierra -S - - -Llave -Llave -W - - -Destornillador -Destornillador -E - - -Rodillo de pintura -Rodillo de pintura -P - - -Muebles -Muebles -F -

Conclusión

-

Resumen de los puntos principales

- -

Preguntas frecuentes

-

Aquí están algunas de las preguntas más frecuentes sobre House Designer Fix y Flip mod APK PC:

-
    -
  1. Q: ¿Es seguro descargar e instalar House Designer Fix and Flip mod APK PC?
  2. -
  3. A: Sí, siempre y cuando descargue el archivo APK mod de una fuente de confianza y utilice un emulador confiable, como BlueStacks. Sin embargo, siempre debe tener cuidado al descargar cualquier archivo de Internet y escanearlo en busca de virus o malware antes de instalarlo.
  4. -
  5. Q: ¿Cuáles son las ventajas de usar un archivo APK mod para House Designer Fix and Flip?
  6. -
  7. A: Un archivo APK mod es una versión modificada del archivo APK original que puede tener algunas características o beneficios adicionales, como dinero ilimitado, elementos desbloqueados o sin anuncios. Sin embargo, no todos los archivos APK mod son los mismos, por lo que siempre debe comprobar la descripción y las revisiones del archivo APK mod antes de descargarlo.
  8. -
  9. Q: ¿Cómo puedo actualizar House Designer Fix and Flip mod APK PC?
  10. -
  11. A: Para actualizar House Designer Fix y Flip mod APK PC, es necesario descargar e instalar la última versión del archivo APK mod de la misma fuente que lo descargó de. También es posible que tenga que actualizar el emulador para garantizar la compatibilidad. Sin embargo, siempre debe realizar una copia de seguridad de sus datos antes de actualizar nada, ya que algunas actualizaciones pueden causar errores o problemas técnicos.
  12. -
  13. Q: ¿Cómo puedo contactar al desarrollador de House Designer Fix and Flip?
  14. -
  15. A: Puede ponerse en contacto con el desarrollador de House Designer Fix and Flip enviando un correo electrónico a karategoosestudio@gmail.com o visitando su página de Facebook en https:/www.facebook.com/karaoosestudio/.
  16. -
  17. Q: ¿Dónde puedo encontrar más información o consejos sobre House Designer Fix and Flip?
  18. - -

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Chicos Tropiezo Apk Obb Descargar.md b/spaces/Benson/text-generation/Examples/Chicos Tropiezo Apk Obb Descargar.md deleted file mode 100644 index 078716476320190892efa607a4b8635e6037c482..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Chicos Tropiezo Apk Obb Descargar.md +++ /dev/null @@ -1,68 +0,0 @@ -
-

Tropezar Chicos APK OBB Descargar: Cómo jugar el último partido Knockout Game en su dispositivo Android

-

¿Te encanta jugar juegos de fiesta con tus amigos en línea? ¿Te gustan los desafíos caóticos e hilarantes que ponen a prueba tus habilidades y suerte? Si respondiste que sí, entonces definitivamente deberías probar Stumble Guys, el juego definitivo para dispositivos Android.

-

¿Qué es Stumble Guys?

-

Una breve introducción al juego y sus características

-

Stumble Guys es un juego de fiesta multijugador desarrollado por Kitka Games. Está inspirado en programas de televisión populares como Wipeout y Takeshi’s Castle, donde los concursantes tienen que superar varios obstáculos y trampas para llegar a la línea de meta. El juego cuenta con hasta 32 jugadores en línea, que tienen que luchar a través de los niveles ronda tras ronda de escalada del caos, hasta que un vencedor permanece.

-

chicos tropiezo apk obb descargar


DOWNLOAD >> https://bltlly.com/2v6M6L



-

El juego tiene muchos modos diferentes, mapas y trajes para elegir. Puedes jugar solo o con tus amigos en partidas personalizadas. También puedes desbloquear nuevos trajes y accesorios para tu personaje, como sombreros, gafas, máscaras y más. El juego se actualiza constantemente con nuevos contenidos y mejoras.

-

¿Por qué es tan popular y divertido?

-

Stumble Guys es uno de los juegos más populares y divertidos de Google Play Store, con más de 10 millones de descargas y una calificación de 4.4 estrellas. El juego es amado por muchos jugadores por su juego simple pero adictivo, gráficos coloridos y física hilarante. El juego es fácil de recoger y jugar, pero difícil de dominar. Nunca se sabe lo que sucederá a continuación, ya que cada nivel está lleno de sorpresas y aleatoriedad. Te reirás, gritarás, rabiarás y celebrarás mientras tropiezas hacia la victoria o la derrota.

-

¿Cómo descargar e instalar Stumble Guys APK OBB en su dispositivo Android?

-

¿Qué son los archivos APK y OBB y por qué los necesita?

- -

Stumble Guys es uno de esos juegos que necesitan tanto archivos APK como OBB para funcionar. El archivo APK contiene la información básica y el código del juego, mientras que el archivo OBB contiene los datos adicionales como gráficos, sonidos, mapas, etc. Necesitas ambos archivos para disfrutar de todas las características del juego.

-

¿Dónde encontrar la versión más reciente y oficial de Stumble Guys APK OBB?

-

La forma más fácil de encontrar la versión más reciente y oficial de Stumble Guys APK OBB es descargarlo desde la Google Play Store. Sin embargo, algunos dispositivos pueden no ser compatibles con el juego o pueden tener espacio de almacenamiento limitado. En ese caso, puede descargar los archivos de una fuente de terceros de confianza como [StumbleGuys.net]( 1 ), que es el único sitio donde se puede descargar Stumble Guys APK OBB gratis.

Cómo instalar Stumble Guys APK OBB paso a paso? -

Instalar Stumble Guys APK OBB no es difícil, pero es necesario seguir algunos pasos con cuidado. Aquí hay una guía sobre cómo hacerlo:

-

Paso 1: Habilitar fuentes desconocidas en su dispositivo

-

Antes de que pueda instalar cualquier archivo APK en su dispositivo, debe habilitar la opción para permitir fuentes desconocidas. Esto le permitirá instalar aplicaciones de fuentes distintas de Google Play Store. Para hacer esto, vaya a la configuración del dispositivo, luego la seguridad, luego cambie la opción de fuentes desconocidas. Puede ver un mensaje de advertencia, pero no se preocupe, es seguro siempre y cuando descargue los archivos de una fuente confiable.

-

Paso 2: Descargar Stumble Guys APK OBB de una fuente de confianza

-

Siguiente, es necesario descargar los archivos OBB Stumble Guys APK de una fuente de confianza. Como se mencionó anteriormente, el único sitio donde se puede descargar Stumble Guys APK OBB de forma gratuita es [StumbleGuys.net]. Ve al sitio y haz clic en el botón de descarga. Verás dos archivos: uno con la extensión APK y otro con la extensión ZIP. Descarga ambos archivos y guárdalos en una carpeta en tu dispositivo.

-

Paso 3: Localizar y extraer el archivo OBB

- -

Paso 4: Instalar el archivo APK y lanzar el juego

-

El paso final es instalar el archivo APK y lanzar el juego. Para hacer esto, vuelva a la aplicación de administrador de archivos y toque en el archivo APK. Verá un mensaje pidiéndole que instale la aplicación. Toque en instalar y espere a que termine. Una vez hecho, puede iniciar el juego tocando en su icono en la pantalla de inicio o cajón de aplicaciones. Verás una pantalla de bienvenida y luego el menú principal del juego.

-

¿Cómo se juega Stumble chicos en su dispositivo Android?

-

¿Cómo crear una cuenta y personalizar tu personaje?

-

Para jugar Stumble Guys online, necesitas crear una cuenta y personalizar tu personaje. Para hacer esto, toque en el botón de reproducción en el menú principal y luego toque en el icono de perfil en la esquina superior izquierda. Verás una pantalla donde puedes introducir tu nombre de usuario, elegir tu región y personalizar tu personaje. Puedes cambiar el color de piel, el estilo de cabello, el atuendo y los accesorios de tu personaje tocando los iconos de abajo. También puedes desbloquear nuevos objetos jugando más partidos o comprándolos con monedas.

-

-

¿Cómo unirse a un partido y competir con otros jugadores?

-

Para unirse a un partido y competir con otros jugadores, toque en el botón de reproducción en el menú principal y luego elija uno de los modos: solo o personalizado. En el modo individual, te unirás a un partido aleatorio con hasta 31 jugadores en línea. En el modo personalizado, puede crear o unirse a una partida privada con sus amigos u otros jugadores utilizando un código. Una vez que se une a un partido, verá un temporizador de cuenta atrás y luego el juego comenzará.

-

¿Cómo usar potenciadores y evitar obstáculos?

- -

Durante cada nivel, verás varios power-ups que pueden darte una ventaja o desventaja. Algunos de ellos son el aumento de velocidad, el rayo de contracción, el rayo de congelación, cáscara de plátano, etc. Para usarlos, solo pásalos y se activarán automáticamente. Sin embargo, ten cuidado, ya que algunos de ellos también pueden afectarte a ti o a otros jugadores cercanos.

-

El objetivo de cada nivel es llegar a la línea de meta antes de que otros jugadores o antes de que acabe el tiempo. Solo un cierto número de jugadores puede calificar para cada nivel, así que sé rápido e inteligente. Si no calificas o te caes del mapa, serás eliminado del partido.

-

ConclusiónConclusión

-

Stumble Guys es un divertido y adictivo juego de fiesta que puedes jugar en tu dispositivo Android con tus amigos u otros jugadores en línea. Es un juego de habilidad, suerte y risa, donde tienes que superar varios obstáculos y trampas para llegar a la meta. Para jugar el juego, es necesario descargar e instalar los archivos OBB Stumble Guys APK de una fuente de confianza. Luego, puedes crear una cuenta, personalizar tu personaje y unirte a una partida. También puedes usar potenciadores y evitar obstáculos para ganar ventaja sobre tus oponentes. Stumble Guys es un juego que te mantendrá entretenido durante horas y te hará sonreír.

-

Si usted está buscando un nuevo y emocionante juego para jugar en su dispositivo Android, entonces usted debe probar definitivamente Stumble Guys. Es gratis para descargar y jugar, y se actualiza constantemente con nuevos contenidos y mejoras. ¡Descarga Stumble Guys APK OBB hoy y únete al último juego de knockout de fiesta!

-

Preguntas frecuentes

-

Aquí hay algunas preguntas frecuentes sobre Stumble Guys APK OBB descargar:

- - -Pregunta -Respuesta - - -¿Es seguro descargar e instalar Stumble Guys APK OBB? - - - -¿Necesito una conexión a Internet para jugar a Stumble Guys? -Sí, necesitas una conexión a Internet para jugar a Stumble Guys online con otros jugadores. Sin embargo, también puedes jugar sin conexión en el modo de práctica, donde puedes probar diferentes niveles y potenciadores. - - -¿Cómo puedo jugar Stumble Guys con mis amigos? -Puedes jugar a Stumble Guys con tus amigos creando o uniéndote a una partida personalizada. Para hacer esto, toque en el botón de reproducción en el menú principal y luego elegir el modo personalizado. Puede crear una nueva coincidencia o introducir un código para unirse a una existente. También puede invitar a sus amigos compartiendo el código con ellos. - - -¿Cómo puedo obtener más monedas en Stumble Guys? -Puedes obtener más monedas en Stumble Guys jugando más partidos, completando misiones diarias, viendo anuncios o comprándolos con dinero real. Puedes usar monedas para desbloquear nuevos atuendos y accesorios para tu personaje. - - -¿Cómo puedo contactar a los desarrolladores de Stumble Guys? -Puede ponerse en contacto con los desarrolladores de Stumble Guys enviándoles un correo electrónico a support@kitkagames.com o siguiéndolos en sus cuentas de redes sociales como Facebook, Twitter, Instagram y YouTube. También puede dejar un comentario o retroalimentación en Google Play Store. - -

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Descargar Fuera De La Carretera Mod Apk Todos Los Coches Desbloqueados ltima Versin.md b/spaces/Benson/text-generation/Examples/Descargar Fuera De La Carretera Mod Apk Todos Los Coches Desbloqueados ltima Versin.md deleted file mode 100644 index c55e61dda56de8722551ed6b181479067f1a53dd..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar Fuera De La Carretera Mod Apk Todos Los Coches Desbloqueados ltima Versin.md +++ /dev/null @@ -1,55 +0,0 @@ - -

Descargar Off The Road Mod APK Todos los coches desbloqueados última versión

-

Si eres un fan de los juegos de conducción todoterreno, te encantará Off The Road, un juego realista e inmersivo que te permite conducir varios vehículos en terrenos desafiantes. En este artículo, le mostraremos cómo descargar e instalar Off The Road mod apk, que le da monedas ilimitadas y todos los coches desbloqueados. También compartiremos algunos consejos y trucos para ayudarte a dominar el juego y divertirte más.

-

descargar fuera de la carretera mod apk todos los coches desbloqueados última versión


Download > https://bltlly.com/2v6JHv



-

¿Qué está fuera de la carretera?

-

Off The Road es un juego desarrollado por Dogbyte Games, los creadores de Zombie Offroad Safari y Blocky Roads. Está disponible para dispositivos Android e iOS, así como para Nintendo Switch. Off The Road es un juego que cuenta con 12 pistas y 18 vehículos de Ford y Land Rover. El juego tiene cinco modos de juego: carrera rápida, carrera, torneo, árcade y multijugador. El modo carrera es el modo principal del juego, en el que el jugador corre para desbloquear nuevos vehículos, pistas y tipos de carreras. El juego también presenta simulaciones realistas de vehículos y física. Otras características incluyen desafíos todoterreno, trabajos de transporte y la capacidad de jugar el juego con o sin internet.

-

Características de Off The Road

-

Algunas de las características que hacen de Off The Road un gran juego son:

- -

¿Por qué descargar Off The Road mod apk?

- -

Si desea descargar e instalar Off The Road mod apk, puede seguir estos sencillos pasos:

-

Paso 1: Habilitar fuentes desconocidas

-

Antes de que pueda instalar Off The Road mod apk, es necesario habilitar fuentes desconocidas en el dispositivo. Esto le permitirá instalar aplicaciones que no son de Google Play o App Store. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo.

-

Paso 2: Descargar el archivo apk mod

-

Siguiente, es necesario descargar el archivo apk mod de una fuente confiable. Puede utilizar este enlace para descargar la última versión de Off The Road mod apk, que es 1.6.2 a partir de junio de 2023. El tamaño del archivo es de unos 150 MB, así que asegúrate de tener suficiente espacio en tu dispositivo.

-

-

Paso 3: Instalar el archivo apk mod

-

Una vez que haya descargado el archivo apk mod, necesita instalarlo en su dispositivo. Para hacer esto, busque el archivo en su administrador de archivos y toque en él. Puede ver una ventana emergente pidiendo permiso para instalar la aplicación. Toque en Instalar y espere a que se complete la instalación.

-

Paso 4: Iniciar el juego y disfrutar de

-

Después de la instalación, puede iniciar el juego desde el cajón de la aplicación o la pantalla de inicio. Verás que tienes monedas ilimitadas y todos los coches desbloqueados en el juego. Ahora puedes disfrutar jugando Off The Road con todas sus características y contenido gratis.

-

Consejos y trucos para jugar fuera de la carretera

-

Ahora que ha descargado e instalado Off The Road mod apk, es posible que desee saber algunos consejos y trucos para ayudarle a jugar mejor el juego y divertirse más. Estos son algunos de ellos:

-

Utilice el mapa y la brújula para navegar

- -

Actualiza tus vehículos y desbloquea nuevos

-

Off The Road tiene 18 vehículos diferentes que puedes conducir en el juego. Cada vehículo tiene sus propias fortalezas y debilidades, tales como velocidad, manejo, durabilidad, capacidad de combustible, etc. Puede actualizar sus vehículos con varias piezas y accesorios, como neumáticos, motores, suspensiones, frenos, etc. Actualizar sus vehículos mejorará su rendimiento y los hará más adecuados para diferentes terrenos y desafíos. También puedes desbloquear vehículos nuevos completando misiones o comprándolos con monedas. Algunos vehículos son exclusivos para la versión premium del juego, pero se puede acceder a ellos con Off The Road mod apk. Puedes cambiar entre diferentes vehículos yendo a tu garaje o encontrándolos en el mundo.

-

Completa desafíos y misiones para ganar monedas y XP

-

Off The Road tiene varios desafíos y misiones que puedes completar para ganar monedas y XP. Las monedas se utilizan para comprar o actualizar vehículos, mientras que XP se utilizan para subir de nivel y desbloquear nuevas características. Los desafíos son tareas cortas que ponen a prueba tus habilidades en la conducción, como la deriva, saltar, aplastar, etc. Las misiones son tareas más largas que implican carreras, entrega, rescate, destrucción, etc. Puedes encontrar desafíos y misiones en el mapa o en la brújula. También puedes aceptarlos de los NPCs que conoces en el mundo. Completar desafíos y misiones te recompensará con monedas y XP según tu rendimiento.

-

Explora el mundo abierto y descubre secretos ocultos

- -

Utilice el helicóptero para volar sobre los obstáculos y llegar a nuevas áreas

-

Off The Road tiene un helicóptero que puede utilizar para volar sobre el mundo. El helicóptero es uno de los vehículos más versátiles del juego, ya que puede ir a cualquier parte y hacer cualquier cosa. Puede utilizar el helicóptero para volar sobre los obstáculos y llegar a nuevas áreas que de otra manera son inaccesibles por tierra o agua. También puede utilizar el helicóptero para realizar acrobacias, tales como rollos de barril, bucles, inmersiones, etc. El helicóptero tiene un cabrestante que se puede utilizar para conectar o separar objetos o vehículos. Puede utilizar el cabrestante para levantar o soltar objetos o vehículos en diferentes lugares. También puede utilizar el cabrestante para remolcar o rescatar otros vehículos que están atascados o dañados.

-

Conclusión

-

Off The Road es un juego que ofrece una experiencia de conducción todoterreno realista e inmersiva. Tiene 18 vehículos diferentes que se pueden conducir en 12 pistas con diversos terrenos y efectos climáticos. Tiene cinco modos de juego que puedes jugar online o offline. Tiene gráficos realistas y efectos de sonido que hacen que el juego sea más agradable. Si desea descargar e instalar Off The Road mod apk, puede seguir los pasos que hemos proporcionado en este artículo. Off The Road mod apk le da monedas ilimitadas y todos los coches desbloqueados de forma gratuita. De esta manera, puedes acceder a todas las características y contenidos del juego sin gastar dinero ni esperar largas horas. Esperamos que haya encontrado este artículo útil e informativo. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. ¡Gracias por leer y jugar feliz!

-

Preguntas frecuentes

-

Aquí hay algunas preguntas frecuentes acerca de Off The Road mod apk:

-

Q: Está fuera de la carretera mod apk seguro para descargar e instalar?

- -

Q: ¿Cuáles son los beneficios de Off The Road mod apk?

-

A: Off The Road mod apk le da monedas ilimitadas y todos los coches desbloqueados de forma gratuita. Esto significa que puedes disfrutar de todas las características y contenidos del juego sin gastar dinero ni esperar largas horas. También puede acceder a los vehículos premium que no están disponibles en la versión gratuita.

-

Q: ¿Cómo puedo actualizar Off The Road mod apk?

-

A: Para actualizar Off The Road mod apk, es necesario descargar e instalar la última versión del archivo apk mod de una fuente confiable. Puede utilizar este enlace para descargar la última versión de Off The Road mod apk, que es 1.6.2 a partir de junio de 2023. No es necesario desinstalar la versión anterior del apk mod antes de instalar el nuevo.

-

Q: ¿Cómo puedo desinstalar Off The Road mod apk?

-

A: Para desinstalar Off The Road mod apk, es necesario ir a Configuración > Aplicaciones > Off The Road > Desinstalar y toque en OK. También puedes desinstalar el juego presionando su icono en la pantalla de inicio o en el cajón de la aplicación y arrastrándolo a la opción Desinstalar.

-

Q: ¿Puedo jugar Off The Road mod apk con mis amigos?

-

A: Sí, puedes jugar Off The Road mod apk con tus amigos en línea o fuera de línea. Puedes unirte o crear salas en línea con hasta siete jugadores más en el modo multijugador. También puedes jugar con tus amigos en Wi-Fi local o Bluetooth en el modo árcade.

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/metadata/__init__.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/metadata/__init__.py deleted file mode 100644 index 9f73ca7105ff0bf11d74dd16ffb0653059466f70..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/metadata/__init__.py +++ /dev/null @@ -1,127 +0,0 @@ -import contextlib -import functools -import os -import sys -from typing import TYPE_CHECKING, List, Optional, Type, cast - -from pip._internal.utils.misc import strtobool - -from .base import BaseDistribution, BaseEnvironment, FilesystemWheel, MemoryWheel, Wheel - -if TYPE_CHECKING: - from typing import Protocol -else: - Protocol = object - -__all__ = [ - "BaseDistribution", - "BaseEnvironment", - "FilesystemWheel", - "MemoryWheel", - "Wheel", - "get_default_environment", - "get_environment", - "get_wheel_distribution", - "select_backend", -] - - -def _should_use_importlib_metadata() -> bool: - """Whether to use the ``importlib.metadata`` or ``pkg_resources`` backend. - - By default, pip uses ``importlib.metadata`` on Python 3.11+, and - ``pkg_resourcess`` otherwise. This can be overridden by a couple of ways: - - * If environment variable ``_PIP_USE_IMPORTLIB_METADATA`` is set, it - dictates whether ``importlib.metadata`` is used, regardless of Python - version. - * On Python 3.11+, Python distributors can patch ``importlib.metadata`` - to add a global constant ``_PIP_USE_IMPORTLIB_METADATA = False``. This - makes pip use ``pkg_resources`` (unless the user set the aforementioned - environment variable to *True*). - """ - with contextlib.suppress(KeyError, ValueError): - return bool(strtobool(os.environ["_PIP_USE_IMPORTLIB_METADATA"])) - if sys.version_info < (3, 11): - return False - import importlib.metadata - - return bool(getattr(importlib.metadata, "_PIP_USE_IMPORTLIB_METADATA", True)) - - -class Backend(Protocol): - Distribution: Type[BaseDistribution] - Environment: Type[BaseEnvironment] - - -@functools.lru_cache(maxsize=None) -def select_backend() -> Backend: - if _should_use_importlib_metadata(): - from . import importlib - - return cast(Backend, importlib) - from . import pkg_resources - - return cast(Backend, pkg_resources) - - -def get_default_environment() -> BaseEnvironment: - """Get the default representation for the current environment. - - This returns an Environment instance from the chosen backend. The default - Environment instance should be built from ``sys.path`` and may use caching - to share instance state accorss calls. - """ - return select_backend().Environment.default() - - -def get_environment(paths: Optional[List[str]]) -> BaseEnvironment: - """Get a representation of the environment specified by ``paths``. - - This returns an Environment instance from the chosen backend based on the - given import paths. The backend must build a fresh instance representing - the state of installed distributions when this function is called. - """ - return select_backend().Environment.from_paths(paths) - - -def get_directory_distribution(directory: str) -> BaseDistribution: - """Get the distribution metadata representation in the specified directory. - - This returns a Distribution instance from the chosen backend based on - the given on-disk ``.dist-info`` directory. - """ - return select_backend().Distribution.from_directory(directory) - - -def get_wheel_distribution(wheel: Wheel, canonical_name: str) -> BaseDistribution: - """Get the representation of the specified wheel's distribution metadata. - - This returns a Distribution instance from the chosen backend based on - the given wheel's ``.dist-info`` directory. - - :param canonical_name: Normalized project name of the given wheel. - """ - return select_backend().Distribution.from_wheel(wheel, canonical_name) - - -def get_metadata_distribution( - metadata_contents: bytes, - filename: str, - canonical_name: str, -) -> BaseDistribution: - """Get the dist representation of the specified METADATA file contents. - - This returns a Distribution instance from the chosen backend sourced from the data - in `metadata_contents`. - - :param metadata_contents: Contents of a METADATA file within a dist, or one served - via PEP 658. - :param filename: Filename for the dist this metadata represents. - :param canonical_name: Normalized project name of the given dist. - """ - return select_backend().Distribution.from_metadata_file_contents( - metadata_contents, - filename, - canonical_name, - ) diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/network/__init__.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/network/__init__.py deleted file mode 100644 index b51bde91b2e5b4e557ed9b70fc113843cc3d49ae..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/network/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -"""Contains purely network-related utilities. -""" diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/formatter.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/formatter.py deleted file mode 100644 index a2349ef8652c659388ba69477c01989f2e4ce17d..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/formatter.py +++ /dev/null @@ -1,94 +0,0 @@ -""" - pygments.formatter - ~~~~~~~~~~~~~~~~~~ - - Base formatter class. - - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import codecs - -from pip._vendor.pygments.util import get_bool_opt -from pip._vendor.pygments.styles import get_style_by_name - -__all__ = ['Formatter'] - - -def _lookup_style(style): - if isinstance(style, str): - return get_style_by_name(style) - return style - - -class Formatter: - """ - Converts a token stream to text. - - Options accepted: - - ``style`` - The style to use, can be a string or a Style subclass - (default: "default"). Not used by e.g. the - TerminalFormatter. - ``full`` - Tells the formatter to output a "full" document, i.e. - a complete self-contained document. This doesn't have - any effect for some formatters (default: false). - ``title`` - If ``full`` is true, the title that should be used to - caption the document (default: ''). - ``encoding`` - If given, must be an encoding name. This will be used to - convert the Unicode token strings to byte strings in the - output. If it is "" or None, Unicode strings will be written - to the output file, which most file-like objects do not - support (default: None). - ``outencoding`` - Overrides ``encoding`` if given. - """ - - #: Name of the formatter - name = None - - #: Shortcuts for the formatter - aliases = [] - - #: fn match rules - filenames = [] - - #: If True, this formatter outputs Unicode strings when no encoding - #: option is given. - unicodeoutput = True - - def __init__(self, **options): - self.style = _lookup_style(options.get('style', 'default')) - self.full = get_bool_opt(options, 'full', False) - self.title = options.get('title', '') - self.encoding = options.get('encoding', None) or None - if self.encoding in ('guess', 'chardet'): - # can happen for e.g. pygmentize -O encoding=guess - self.encoding = 'utf-8' - self.encoding = options.get('outencoding') or self.encoding - self.options = options - - def get_style_defs(self, arg=''): - """ - Return the style definitions for the current style as a string. - - ``arg`` is an additional argument whose meaning depends on the - formatter used. Note that ``arg`` can also be a list or tuple - for some formatters like the html formatter. - """ - return '' - - def format(self, tokensource, outfile): - """ - Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` - tuples and write it into ``outfile``. - """ - if self.encoding: - # wrap the outfile in a StreamWriter - outfile = codecs.lookup(self.encoding)[3](outfile) - return self.format_unencoded(tokensource, outfile) diff --git a/spaces/Branon/TurboKeys/Dockerfile b/spaces/Branon/TurboKeys/Dockerfile deleted file mode 100644 index cee9bcd0c69dbeb6e903c3f64531b2ff70f021f6..0000000000000000000000000000000000000000 --- a/spaces/Branon/TurboKeys/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM node:18-bullseye-slim -RUN apt-get update && \ - apt-get install -y git -RUN git clone https://gitlab.com/khanon/oai-proxy.git /app -WORKDIR /app -RUN npm install -COPY Dockerfile greeting.md* .env* ./ -RUN npm run build -EXPOSE 7860 -ENV NODE_ENV=production -CMD [ "npm", "start" ] \ No newline at end of file diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/configs/Detectron1-Comparisons/README.md b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/configs/Detectron1-Comparisons/README.md deleted file mode 100644 index 45ab6dab9cf78824788ec73ae3638837bbfca297..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/configs/Detectron1-Comparisons/README.md +++ /dev/null @@ -1,82 +0,0 @@ - -Detectron2's default settings and a few implementation details are different from Detectron. - -The differences in implementation details are shared in -[Compatibility with Other Libraries](../../docs/notes/compatibility.md). - -The differences in default config includes: -* Use scale augmentation during training. This improves AP with lower training cost. -* Use L1 loss instead of smooth L1 loss for simplicity. This sometimes improves box AP but may - affect other AP. -* Use `POOLER_SAMPLING_RATIO=0` instead of 2. This does not significantly affect AP. -* Use `ROIAlignV2`. This does not significantly affect AP. - -In this directory, we provide a few configs that mimic Detectron's behavior as close as possible. -This provides a fair comparison of accuracy and speed against Detectron. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
mask
AP
kp.
AP
model iddownload
Faster R-CNN1x0.2190.0383.136.9137781054model | metrics
Keypoint R-CNN1x0.3130.0715.053.164.2137781195model | metrics
Mask R-CNN1x0.2730.0433.437.834.9137781281model | metrics
- -## Comparisons: - -* Faster R-CNN: Detectron's AP is 36.7, similar to ours. -* Keypoint R-CNN: Detectron's AP is box 53.6, keypoint 64.2. Fixing a Detectron's - [bug](https://github.com/facebookresearch/Detectron/issues/459) lead to a drop in box AP, and can be - compensated back by some parameter tuning. -* Mask R-CNN: Detectron's AP is box 37.7, mask 33.9. We're 1 AP better in mask AP, due to more correct implementation. - -For speed comparison, see [benchmarks](https://detectron2.readthedocs.io/notes/benchmarks.html). diff --git a/spaces/CVPR/LIVE/pybind11/tests/test_buffers.py b/spaces/CVPR/LIVE/pybind11/tests/test_buffers.py deleted file mode 100644 index d6adaf1f5eee00f93e2b0ba7e3838c1107297080..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/pybind11/tests/test_buffers.py +++ /dev/null @@ -1,109 +0,0 @@ -# -*- coding: utf-8 -*- -import io -import struct - -import pytest - -import env # noqa: F401 - -from pybind11_tests import buffers as m -from pybind11_tests import ConstructorStats - -np = pytest.importorskip("numpy") - - -def test_from_python(): - with pytest.raises(RuntimeError) as excinfo: - m.Matrix(np.array([1, 2, 3])) # trying to assign a 1D array - assert str(excinfo.value) == "Incompatible buffer format!" - - m3 = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) - m4 = m.Matrix(m3) - - for i in range(m4.rows()): - for j in range(m4.cols()): - assert m3[i, j] == m4[i, j] - - cstats = ConstructorStats.get(m.Matrix) - assert cstats.alive() == 1 - del m3, m4 - assert cstats.alive() == 0 - assert cstats.values() == ["2x3 matrix"] - assert cstats.copy_constructions == 0 - # assert cstats.move_constructions >= 0 # Don't invoke any - assert cstats.copy_assignments == 0 - assert cstats.move_assignments == 0 - - -# https://foss.heptapod.net/pypy/pypy/-/issues/2444 -def test_to_python(): - mat = m.Matrix(5, 4) - assert memoryview(mat).shape == (5, 4) - - assert mat[2, 3] == 0 - mat[2, 3] = 4.0 - mat[3, 2] = 7.0 - assert mat[2, 3] == 4 - assert mat[3, 2] == 7 - assert struct.unpack_from('f', mat, (3 * 4 + 2) * 4) == (7, ) - assert struct.unpack_from('f', mat, (2 * 4 + 3) * 4) == (4, ) - - mat2 = np.array(mat, copy=False) - assert mat2.shape == (5, 4) - assert abs(mat2).sum() == 11 - assert mat2[2, 3] == 4 and mat2[3, 2] == 7 - mat2[2, 3] = 5 - assert mat2[2, 3] == 5 - - cstats = ConstructorStats.get(m.Matrix) - assert cstats.alive() == 1 - del mat - pytest.gc_collect() - assert cstats.alive() == 1 - del mat2 # holds a mat reference - pytest.gc_collect() - assert cstats.alive() == 0 - assert cstats.values() == ["5x4 matrix"] - assert cstats.copy_constructions == 0 - # assert cstats.move_constructions >= 0 # Don't invoke any - assert cstats.copy_assignments == 0 - assert cstats.move_assignments == 0 - - -def test_inherited_protocol(): - """SquareMatrix is derived from Matrix and inherits the buffer protocol""" - - matrix = m.SquareMatrix(5) - assert memoryview(matrix).shape == (5, 5) - assert np.asarray(matrix).shape == (5, 5) - - -def test_pointer_to_member_fn(): - for cls in [m.Buffer, m.ConstBuffer, m.DerivedBuffer]: - buf = cls() - buf.value = 0x12345678 - value = struct.unpack('i', bytearray(buf))[0] - assert value == 0x12345678 - - -def test_readonly_buffer(): - buf = m.BufferReadOnly(0x64) - view = memoryview(buf) - assert view[0] == b'd' if env.PY2 else 0x64 - assert view.readonly - - -def test_selective_readonly_buffer(): - buf = m.BufferReadOnlySelect() - - memoryview(buf)[0] = b'd' if env.PY2 else 0x64 - assert buf.value == 0x64 - - io.BytesIO(b'A').readinto(buf) - assert buf.value == ord(b'A') - - buf.readonly = True - with pytest.raises(TypeError): - memoryview(buf)[0] = b'\0' if env.PY2 else 0 - with pytest.raises(TypeError): - io.BytesIO(b'1').readinto(buf) diff --git a/spaces/CVPR/LIVE/thrust/thrust/replace.h b/spaces/CVPR/LIVE/thrust/thrust/replace.h deleted file mode 100644 index 225cb060a6ee01d23eaac574f6d03ad7a964a22b..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/replace.h +++ /dev/null @@ -1,823 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -/*! \file replace.h - * \brief Functions for replacing elements in a range with a particular value - */ - -#pragma once - -#include -#include - -namespace thrust -{ - - -/*! \addtogroup transformations - * \addtogroup replacing - * \ingroup transformations - * \{ - */ - - -/*! \p replace replaces every element in the range [first, last) equal to \p old_value - * with \p new_value. That is: for every iterator \c i, if *i == old_value - * then it performs the assignment *i = new_value. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first The beginning of the sequence of interest. - * \param last The end of the sequence of interest. - * \param old_value The value to replace. - * \param new_value The new value to replace \p old_value. - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam ForwardIterator is a model of Forward Iterator, - * and \p ForwardIterator is mutable. - * \tparam T is a model of Assignable, - * \p T is a model of EqualityComparable, - * objects of \p T may be compared for equality with objects of - * \p ForwardIterator's \c value_type, - * and \p T is convertible to \p ForwardIterator's \c value_type. - * - * The following code snippet demonstrates how to use \p replace to replace - * a value of interest in a \c device_vector with another using the \p thrust::device - * execution policy for parallelization: - * - * \code - * #include - * #include - * #include - * - * ... - * - * thrust::device_vector A(4); - * A[0] = 1; - * A[1] = 2; - * A[2] = 3; - * A[3] = 1; - * - * thrust::replace(thrust::device, A.begin(), A.end(), 1, 99); - * - * // A contains [99, 2, 3, 99] - * \endcode - * - * \see http://www.sgi.com/tech/stl/replace.html - * \see \c replace_if - * \see \c replace_copy - * \see \c replace_copy_if - */ -template -__host__ __device__ - void replace(const thrust::detail::execution_policy_base &exec, - ForwardIterator first, ForwardIterator last, - const T &old_value, - const T &new_value); - - -/*! \p replace replaces every element in the range [first, last) equal to \p old_value - * with \p new_value. That is: for every iterator \c i, if *i == old_value - * then it performs the assignment *i = new_value. - * - * \param first The beginning of the sequence of interest. - * \param last The end of the sequence of interest. - * \param old_value The value to replace. - * \param new_value The new value to replace \p old_value. - * - * \tparam ForwardIterator is a model of Forward Iterator, - * and \p ForwardIterator is mutable. - * \tparam T is a model of Assignable, - * \p T is a model of EqualityComparable, - * objects of \p T may be compared for equality with objects of - * \p ForwardIterator's \c value_type, - * and \p T is convertible to \p ForwardIterator's \c value_type. - * - * The following code snippet demonstrates how to use \p replace to replace - * a value of interest in a \c device_vector with another. - * - * \code - * #include - * #include - * - * ... - * - * thrust::device_vector A(4); - * A[0] = 1; - * A[1] = 2; - * A[2] = 3; - * A[3] = 1; - * - * thrust::replace(A.begin(), A.end(), 1, 99); - * - * // A contains [99, 2, 3, 99] - * \endcode - * - * \see http://www.sgi.com/tech/stl/replace.html - * \see \c replace_if - * \see \c replace_copy - * \see \c replace_copy_if - */ -template - void replace(ForwardIterator first, ForwardIterator last, const T &old_value, - const T &new_value); - - -/*! \p replace_if replaces every element in the range [first, last) for which - * \p pred returns \c true with \p new_value. That is: for every iterator \c i, if - * pred(*i) is \c true then it performs the assignment *i = new_value. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first The beginning of the sequence of interest. - * \param last The end of the sequence of interest. - * \param pred The predicate to test on every value of the range [first,last). - * \param new_value The new value to replace elements which pred(*i) evaluates - * to \c true. - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam ForwardIterator is a model of Forward Iterator, - * \p ForwardIterator is mutable, - * and \p ForwardIterator's \c value_type is convertible to \p Predicate's \c argument_type. - * \tparam Predicate is a model of Predicate. - * \tparam T is a model of Assignable, - * and \p T is convertible to \p ForwardIterator's \c value_type. - * - * The following code snippet demonstrates how to use \p replace_if to replace - * a \c device_vector's negative elements with \c 0 using the \p thrust::device execution policy - * for parallelization: - * - * \code - * #include - * #include - * #include - * ... - * struct is_less_than_zero - * { - * __host__ __device__ - * bool operator()(int x) - * { - * return x < 0; - * } - * }; - * - * ... - * - * thrust::device_vector A(4); - * A[0] = 1; - * A[1] = -3; - * A[2] = 2; - * A[3] = -1; - * - * is_less_than_zero pred; - * - * thrust::replace_if(thrust::device, A.begin(), A.end(), pred, 0); - * - * // A contains [1, 0, 2, 0] - * \endcode - * - * \see http://www.sgi.com/tech/stl/replace_if.html - * \see \c replace - * \see \c replace_copy - * \see \c replace_copy_if - */ -template -__host__ __device__ - void replace_if(const thrust::detail::execution_policy_base &exec, - ForwardIterator first, ForwardIterator last, - Predicate pred, - const T &new_value); - - -/*! \p replace_if replaces every element in the range [first, last) for which - * \p pred returns \c true with \p new_value. That is: for every iterator \c i, if - * pred(*i) is \c true then it performs the assignment *i = new_value. - * - * \param first The beginning of the sequence of interest. - * \param last The end of the sequence of interest. - * \param pred The predicate to test on every value of the range [first,last). - * \param new_value The new value to replace elements which pred(*i) evaluates - * to \c true. - * - * \tparam ForwardIterator is a model of Forward Iterator, - * \p ForwardIterator is mutable, - * and \p ForwardIterator's \c value_type is convertible to \p Predicate's \c argument_type. - * \tparam Predicate is a model of Predicate. - * \tparam T is a model of Assignable, - * and \p T is convertible to \p ForwardIterator's \c value_type. - * - * The following code snippet demonstrates how to use \p replace_if to replace - * a \c device_vector's negative elements with \c 0. - * - * \code - * #include - * #include - * ... - * struct is_less_than_zero - * { - * __host__ __device__ - * bool operator()(int x) - * { - * return x < 0; - * } - * }; - * - * ... - * - * thrust::device_vector A(4); - * A[0] = 1; - * A[1] = -3; - * A[2] = 2; - * A[3] = -1; - * - * is_less_than_zero pred; - * - * thrust::replace_if(A.begin(), A.end(), pred, 0); - * - * // A contains [1, 0, 2, 0] - * \endcode - * - * \see http://www.sgi.com/tech/stl/replace_if.html - * \see \c replace - * \see \c replace_copy - * \see \c replace_copy_if - */ -template - void replace_if(ForwardIterator first, ForwardIterator last, - Predicate pred, - const T &new_value); - - -/*! \p replace_if replaces every element in the range [first, last) for which - * pred(*s) returns \c true with \p new_value. That is: for every iterator - * \c i in the range [first, last), and \c s in the range [stencil, stencil + (last - first)), - * if pred(*s) is \c true then it performs the assignment *i = new_value. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first The beginning of the sequence of interest. - * \param last The end of the sequence of interest. - * \param stencil The beginning of the stencil sequence. - * \param pred The predicate to test on every value of the range [first,last). - * \param new_value The new value to replace elements which pred(*i) evaluates - * to \c true. - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam ForwardIterator is a model of Forward Iterator, - * and \p ForwardIterator is mutable. - * \tparam InputIterator is a model of Input Iterator, - * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type. - * \tparam Predicate is a model of Predicate. - * \tparam T is a model of Assignable, - * and \p T is convertible to \p ForwardIterator's \c value_type. - * - * The following code snippet demonstrates how to use \p replace_if to replace - * a \c device_vector's element with \c 0 when its corresponding stencil element is less than zero - * using the \p thrust::device execution policy for parallelization: - * - * \code - * #include - * #include - * #include - * - * struct is_less_than_zero - * { - * __host__ __device__ - * bool operator()(int x) - * { - * return x < 0; - * } - * }; - * - * ... - * - * thrust::device_vector A(4); - * A[0] = 10; - * A[1] = 20; - * A[2] = 30; - * A[3] = 40; - * - * thrust::device_vector S(4); - * S[0] = -1; - * S[1] = 0; - * S[2] = -1; - * S[3] = 0; - * - * is_less_than_zero pred; - * thrust::replace_if(thrust::device, A.begin(), A.end(), S.begin(), pred, 0); - * - * // A contains [0, 20, 0, 40] - * \endcode - * - * \see http://www.sgi.com/tech/stl/replace_if.html - * \see \c replace - * \see \c replace_copy - * \see \c replace_copy_if - */ -template -__host__ __device__ - void replace_if(const thrust::detail::execution_policy_base &exec, - ForwardIterator first, ForwardIterator last, - InputIterator stencil, - Predicate pred, - const T &new_value); - - -/*! \p replace_if replaces every element in the range [first, last) for which - * pred(*s) returns \c true with \p new_value. That is: for every iterator - * \c i in the range [first, last), and \c s in the range [stencil, stencil + (last - first)), - * if pred(*s) is \c true then it performs the assignment *i = new_value. - * - * \param first The beginning of the sequence of interest. - * \param last The end of the sequence of interest. - * \param stencil The beginning of the stencil sequence. - * \param pred The predicate to test on every value of the range [first,last). - * \param new_value The new value to replace elements which pred(*i) evaluates - * to \c true. - * - * \tparam ForwardIterator is a model of Forward Iterator, - * and \p ForwardIterator is mutable. - * \tparam InputIterator is a model of Input Iterator, - * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type. - * \tparam Predicate is a model of Predicate. - * \tparam T is a model of Assignable, - * and \p T is convertible to \p ForwardIterator's \c value_type. - * - * The following code snippet demonstrates how to use \p replace_if to replace - * a \c device_vector's element with \c 0 when its corresponding stencil element is less than zero. - * - * \code - * #include - * #include - * - * struct is_less_than_zero - * { - * __host__ __device__ - * bool operator()(int x) - * { - * return x < 0; - * } - * }; - * - * ... - * - * thrust::device_vector A(4); - * A[0] = 10; - * A[1] = 20; - * A[2] = 30; - * A[3] = 40; - * - * thrust::device_vector S(4); - * S[0] = -1; - * S[1] = 0; - * S[2] = -1; - * S[3] = 0; - * - * is_less_than_zero pred; - * thrust::replace_if(A.begin(), A.end(), S.begin(), pred, 0); - * - * // A contains [0, 20, 0, 40] - * \endcode - * - * \see http://www.sgi.com/tech/stl/replace_if.html - * \see \c replace - * \see \c replace_copy - * \see \c replace_copy_if - */ -template - void replace_if(ForwardIterator first, ForwardIterator last, - InputIterator stencil, - Predicate pred, - const T &new_value); - - -/*! \p replace_copy copies elements from the range [first, last) to the range - * [result, result + (last-first)), except that any element equal to \p old_value - * is not copied; \p new_value is copied instead. - * - * More precisely, for every integer \c n such that 0 <= n < last-first, \p replace_copy - * performs the assignment *(result+n) = new_value if *(first+n) == old_value, - * and *(result+n) = *(first+n) otherwise. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first The beginning of the sequence to copy from. - * \param last The end of the sequence to copy from. - * \param result The beginning of the sequence to copy to. - * \param old_value The value to replace. - * \param new_value The replacement value for which *i == old_value evaluates to \c true. - * \return result + (last-first) - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam InputIterator is a model of Input Iterator. - * \tparam OutputIterator is a model of Output Iterator. - * \tparam T is a model of Assignable, - * \p T is a model of Equality Comparable, - * \p T may be compared for equality with \p InputIterator's \c value_type, - * and \p T is convertible to \p OutputIterator's \c value_type. - * - * \pre \p first may equal \p result, but the ranges [first, last) and [result, result + (last - first)) shall not overlap otherwise. - * - * \code - * #include - * #include - * #include - * ... - * thrust::device_vector A(4); - * A[0] = 1; - * A[1] = 2; - * A[2] = 3; - * A[3] = 1; - * - * thrust::device_vector B(4); - * - * thrust::replace_copy(thrust::device, A.begin(), A.end(), B.begin(), 1, 99); - * - * // B contains [99, 2, 3, 99] - * \endcode - * - * \see http://www.sgi.com/tech/stl/replace_copy.html - * \see \c copy - * \see \c replace - * \see \c replace_if - * \see \c replace_copy_if - */ -template -__host__ __device__ - OutputIterator replace_copy(const thrust::detail::execution_policy_base &exec, - InputIterator first, InputIterator last, - OutputIterator result, - const T &old_value, - const T &new_value); - - -/*! \p replace_copy copies elements from the range [first, last) to the range - * [result, result + (last-first)), except that any element equal to \p old_value - * is not copied; \p new_value is copied instead. - * - * More precisely, for every integer \c n such that 0 <= n < last-first, \p replace_copy - * performs the assignment *(result+n) = new_value if *(first+n) == old_value, - * and *(result+n) = *(first+n) otherwise. - * - * \param first The beginning of the sequence to copy from. - * \param last The end of the sequence to copy from. - * \param result The beginning of the sequence to copy to. - * \param old_value The value to replace. - * \param new_value The replacement value for which *i == old_value evaluates to \c true. - * \return result + (last-first) - * - * \tparam InputIterator is a model of Input Iterator. - * \tparam OutputIterator is a model of Output Iterator. - * \tparam T is a model of Assignable, - * \p T is a model of Equality Comparable, - * \p T may be compared for equality with \p InputIterator's \c value_type, - * and \p T is convertible to \p OutputIterator's \c value_type. - * - * \pre \p first may equal \p result, but the ranges [first, last) and [result, result + (last - first)) shall not overlap otherwise. - * - * \code - * #include - * #include - * ... - * thrust::device_vector A(4); - * A[0] = 1; - * A[1] = 2; - * A[2] = 3; - * A[3] = 1; - * - * thrust::device_vector B(4); - * - * thrust::replace_copy(A.begin(), A.end(), B.begin(), 1, 99); - * - * // B contains [99, 2, 3, 99] - * \endcode - * - * \see http://www.sgi.com/tech/stl/replace_copy.html - * \see \c copy - * \see \c replace - * \see \c replace_if - * \see \c replace_copy_if - */ -template - OutputIterator replace_copy(InputIterator first, InputIterator last, - OutputIterator result, const T &old_value, - const T &new_value); - - -/*! \p replace_copy_if copies elements from the range [first, last) to the range - * [result, result + (last-first)), except that any element for which \p pred - * is \c true is not copied; \p new_value is copied instead. - * - * More precisely, for every integer \c n such that 0 <= n < last-first, - * \p replace_copy_if performs the assignment *(result+n) = new_value if - * pred(*(first+n)), and *(result+n) = *(first+n) otherwise. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first The beginning of the sequence to copy from. - * \param last The end of the sequence to copy from. - * \param result The beginning of the sequence to copy to. - * \param pred The predicate to test on every value of the range [first,last). - * \param new_value The replacement value to assign pred(*i) evaluates to \c true. - * \return result + (last-first) - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam InputIterator is a model of Input Iterator, - * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type. - * \tparam OutputIterator is a model of Output Iterator. - * \tparam Predicate is a model of Predicate. - * \tparam T is a model of Assignable, - * and \p T is convertible to \p OutputIterator's \c value_type. - * - * \pre \p first may equal \p result, but the ranges [first, last) and [result, result + (last - first)) shall not overlap otherwise. - * - * \code - * #include - * #include - * #include - * - * struct is_less_than_zero - * { - * __host__ __device__ - * bool operator()(int x) - * { - * return x < 0; - * } - * }; - * - * ... - * - * thrust::device_vector A(4); - * A[0] = 1; - * A[1] = -3; - * A[2] = 2; - * A[3] = -1; - - * thrust::device_vector B(4); - * is_less_than_zero pred; - * - * thrust::replace_copy_if(thrust::device, A.begin(), A.end(), B.begin(), pred, 0); - * - * // B contains [1, 0, 2, 0] - * \endcode - * - * \see http://www.sgi.com/tech/stl/replace_copy_if.html - * \see \c replace - * \see \c replace_if - * \see \c replace_copy - */ -template -__host__ __device__ - OutputIterator replace_copy_if(const thrust::detail::execution_policy_base &exec, - InputIterator first, InputIterator last, - OutputIterator result, - Predicate pred, - const T &new_value); - - -/*! \p replace_copy_if copies elements from the range [first, last) to the range - * [result, result + (last-first)), except that any element for which \p pred - * is \c true is not copied; \p new_value is copied instead. - * - * More precisely, for every integer \c n such that 0 <= n < last-first, - * \p replace_copy_if performs the assignment *(result+n) = new_value if - * pred(*(first+n)), and *(result+n) = *(first+n) otherwise. - * - * \param first The beginning of the sequence to copy from. - * \param last The end of the sequence to copy from. - * \param result The beginning of the sequence to copy to. - * \param pred The predicate to test on every value of the range [first,last). - * \param new_value The replacement value to assign pred(*i) evaluates to \c true. - * \return result + (last-first) - * - * \tparam InputIterator is a model of Input Iterator, - * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type. - * \tparam OutputIterator is a model of Output Iterator. - * \tparam Predicate is a model of Predicate. - * \tparam T is a model of Assignable, - * and \p T is convertible to \p OutputIterator's \c value_type. - * - * \pre \p first may equal \p result, but the ranges [first, last) and [result, result + (last - first)) shall not overlap otherwise. - * - * \code - * #include - * #include - * - * struct is_less_than_zero - * { - * __host__ __device__ - * bool operator()(int x) - * { - * return x < 0; - * } - * }; - * - * ... - * - * thrust::device_vector A(4); - * A[0] = 1; - * A[1] = -3; - * A[2] = 2; - * A[3] = -1; - - * thrust::device_vector B(4); - * is_less_than_zero pred; - * - * thrust::replace_copy_if(A.begin(), A.end(), B.begin(), pred, 0); - * - * // B contains [1, 0, 2, 0] - * \endcode - * - * \see http://www.sgi.com/tech/stl/replace_copy_if.html - * \see \c replace - * \see \c replace_if - * \see \c replace_copy - */ -template - OutputIterator replace_copy_if(InputIterator first, InputIterator last, - OutputIterator result, - Predicate pred, - const T &new_value); - - -/*! This version of \p replace_copy_if copies elements from the range [first, last) to the range - * [result, result + (last-first)), except that any element whose corresponding stencil - * element causes \p pred to be \c true is not copied; \p new_value is copied instead. - * - * More precisely, for every integer \c n such that 0 <= n < last-first, - * \p replace_copy_if performs the assignment *(result+n) = new_value if - * pred(*(stencil+n)), and *(result+n) = *(first+n) otherwise. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first The beginning of the sequence to copy from. - * \param last The end of the sequence to copy from. - * \param stencil The beginning of the stencil sequence. - * \param result The beginning of the sequence to copy to. - * \param pred The predicate to test on every value of the range [stencil, stencil + (last - first)). - * \param new_value The replacement value to assign when pred(*s) evaluates to \c true. - * \return result + (last-first) - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam InputIterator1 is a model of Input Iterator. - * \tparam InputIterator2 is a model of Input Iterator - * and \p InputIterator2's \c value_type is convertible to \p Predicate's \c argument_type. - * \tparam OutputIterator is a model of Output Iterator. - * \tparam Predicate is a model of Predicate. - * \tparam T is a model of Assignable, - * and \p T is convertible to \p OutputIterator's \c value_type. - * - * \pre \p first may equal \p result, but the ranges [first, last) and [result, result + (last - first)) shall not overlap otherwise. - * \pre \p stencil may equal \p result, but the ranges [stencil, stencil + (last - first)) and [result, result + (last - first)) shall not overlap otherwise. - * - * \code - * #include - * #include - * #include - * - * struct is_less_than_zero - * { - * __host__ __device__ - * bool operator()(int x) - * { - * return x < 0; - * } - * }; - * - * ... - * - * thrust::device_vector A(4); - * A[0] = 10; - * A[1] = 20; - * A[2] = 30; - * A[3] = 40; - * - * thrust::device_vector S(4); - * S[0] = -1; - * S[1] = 0; - * S[2] = -1; - * S[3] = 0; - * - * thrust::device_vector B(4); - * is_less_than_zero pred; - * - * thrust::replace_if(thrust::device, A.begin(), A.end(), S.begin(), B.begin(), pred, 0); - * - * // B contains [0, 20, 0, 40] - * \endcode - * - * \see \c replace_copy - * \see \c replace_if - */ -template -__host__ __device__ - OutputIterator replace_copy_if(const thrust::detail::execution_policy_base &exec, - InputIterator1 first, InputIterator1 last, - InputIterator2 stencil, - OutputIterator result, - Predicate pred, - const T &new_value); - - -/*! This version of \p replace_copy_if copies elements from the range [first, last) to the range - * [result, result + (last-first)), except that any element whose corresponding stencil - * element causes \p pred to be \c true is not copied; \p new_value is copied instead. - * - * More precisely, for every integer \c n such that 0 <= n < last-first, - * \p replace_copy_if performs the assignment *(result+n) = new_value if - * pred(*(stencil+n)), and *(result+n) = *(first+n) otherwise. - * - * \param first The beginning of the sequence to copy from. - * \param last The end of the sequence to copy from. - * \param stencil The beginning of the stencil sequence. - * \param result The beginning of the sequence to copy to. - * \param pred The predicate to test on every value of the range [stencil, stencil + (last - first)). - * \param new_value The replacement value to assign when pred(*s) evaluates to \c true. - * \return result + (last-first) - * - * \tparam InputIterator1 is a model of Input Iterator. - * \tparam InputIterator2 is a model of Input Iterator - * and \p InputIterator2's \c value_type is convertible to \p Predicate's \c argument_type. - * \tparam OutputIterator is a model of Output Iterator. - * \tparam Predicate is a model of Predicate. - * \tparam T is a model of Assignable, - * and \p T is convertible to \p OutputIterator's \c value_type. - * - * \pre \p first may equal \p result, but the ranges [first, last) and [result, result + (last - first)) shall not overlap otherwise. - * \pre \p stencil may equal \p result, but the ranges [stencil, stencil + (last - first)) and [result, result + (last - first)) shall not overlap otherwise. - * - * \code - * #include - * #include - * - * struct is_less_than_zero - * { - * __host__ __device__ - * bool operator()(int x) - * { - * return x < 0; - * } - * }; - * - * ... - * - * thrust::device_vector A(4); - * A[0] = 10; - * A[1] = 20; - * A[2] = 30; - * A[3] = 40; - * - * thrust::device_vector S(4); - * S[0] = -1; - * S[1] = 0; - * S[2] = -1; - * S[3] = 0; - * - * thrust::device_vector B(4); - * is_less_than_zero pred; - * - * thrust::replace_if(A.begin(), A.end(), S.begin(), B.begin(), pred, 0); - * - * // B contains [0, 20, 0, 40] - * \endcode - * - * \see \c replace_copy - * \see \c replace_if - */ -template - OutputIterator replace_copy_if(InputIterator1 first, InputIterator1 last, - InputIterator2 stencil, - OutputIterator result, - Predicate pred, - const T &new_value); - - -/*! \} // end replacing - * \} // transformations - */ - - -} // end thrust - -#include - diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/tbb/memory.h b/spaces/CVPR/LIVE/thrust/thrust/system/tbb/memory.h deleted file mode 100644 index a680157006ba126b5ce7b87829bc697a7b7dfcf6..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/tbb/memory.h +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright 2008-2018 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/*! \file thrust/system/tbb/memory.h - * \brief Managing memory associated with Thrust's TBB system. - */ - -#pragma once - -#include -#include -#include -#include -#include -#include - -namespace thrust -{ -namespace system -{ -namespace tbb -{ - -/*! Allocates an area of memory available to Thrust's tbb system. - * \param n Number of bytes to allocate. - * \return A tbb::pointer pointing to the beginning of the newly - * allocated memory. A null tbb::pointer is returned if - * an error occurs. - * \note The tbb::pointer returned by this function must be - * deallocated with \p tbb::free. - * \see tbb::free - * \see std::malloc - */ -inline pointer malloc(std::size_t n); - -/*! Allocates a typed area of memory available to Thrust's tbb system. - * \param n Number of elements to allocate. - * \return A tbb::pointer pointing to the beginning of the newly - * allocated memory. A null tbb::pointer is returned if - * an error occurs. - * \note The tbb::pointer returned by this function must be - * deallocated with \p tbb::free. - * \see tbb::free - * \see std::malloc - */ -template -inline pointer malloc(std::size_t n); - -/*! Deallocates an area of memory previously allocated by tbb::malloc. - * \param ptr A tbb::pointer pointing to the beginning of an area - * of memory previously allocated with tbb::malloc. - * \see tbb::malloc - * \see std::free - */ -inline void free(pointer ptr); - -/*! \p tbb::allocator is the default allocator used by the \p tbb system's containers such as - * tbb::vector if no user-specified allocator is provided. \p tbb::allocator allocates - * (deallocates) storage with \p tbb::malloc (\p tbb::free). - */ -template -using allocator = thrust::mr::stateless_resource_allocator; - -} // end tbb - -/*! \} - */ - -} // end system - -/*! \namespace thrust::tbb - * \brief \p thrust::tbb is a top-level alias for thrust::system::tbb. - */ -namespace tbb -{ - -using thrust::system::tbb::malloc; -using thrust::system::tbb::free; -using thrust::system::tbb::allocator; - -} // end tbb - -} // end thrust - -#include - diff --git a/spaces/CVPR/WALT/mmdet/models/detectors/mask_scoring_rcnn.py b/spaces/CVPR/WALT/mmdet/models/detectors/mask_scoring_rcnn.py deleted file mode 100644 index b6252b6e1d234a201725342a5780fade7e21957c..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/models/detectors/mask_scoring_rcnn.py +++ /dev/null @@ -1,27 +0,0 @@ -from ..builder import DETECTORS -from .two_stage import TwoStageDetector - - -@DETECTORS.register_module() -class MaskScoringRCNN(TwoStageDetector): - """Mask Scoring RCNN. - - https://arxiv.org/abs/1903.00241 - """ - - def __init__(self, - backbone, - rpn_head, - roi_head, - train_cfg, - test_cfg, - neck=None, - pretrained=None): - super(MaskScoringRCNN, self).__init__( - backbone=backbone, - neck=neck, - rpn_head=rpn_head, - roi_head=roi_head, - train_cfg=train_cfg, - test_cfg=test_cfg, - pretrained=pretrained) diff --git a/spaces/ChrisCaviar/ControlNet-v1-1/app_lineart.py b/spaces/ChrisCaviar/ControlNet-v1-1/app_lineart.py deleted file mode 100644 index 5aefb490d4f3fa858fc1787d90912fdd7f04c22e..0000000000000000000000000000000000000000 --- a/spaces/ChrisCaviar/ControlNet-v1-1/app_lineart.py +++ /dev/null @@ -1,114 +0,0 @@ -#!/usr/bin/env python - -import gradio as gr - -from utils import randomize_seed_fn - - -def create_demo(process, max_images=12, default_num_images=3): - with gr.Blocks() as demo: - with gr.Row(): - with gr.Column(): - image = gr.Image() - prompt = gr.Textbox(label='Prompt') - run_button = gr.Button('Run') - with gr.Accordion('Advanced options', open=False): - preprocessor_name = gr.Radio( - label='Preprocessor', - choices=[ - 'Lineart', - 'Lineart coarse', - 'None', - 'Lineart (anime)', - 'None (anime)', - ], - type='value', - value='Lineart', - info= - 'Note that "Lineart (anime)" and "None (anime)" are for anime base models like Anything-v3.' - ) - num_samples = gr.Slider(label='Number of images', - minimum=1, - maximum=max_images, - value=default_num_images, - step=1) - image_resolution = gr.Slider(label='Image resolution', - minimum=256, - maximum=512, - value=512, - step=256) - preprocess_resolution = gr.Slider( - label='Preprocess resolution', - minimum=128, - maximum=512, - value=512, - step=1) - num_steps = gr.Slider(label='Number of steps', - minimum=1, - maximum=100, - value=20, - step=1) - guidance_scale = gr.Slider(label='Guidance scale', - minimum=0.1, - maximum=30.0, - value=9.0, - step=0.1) - seed = gr.Slider(label='Seed', - minimum=0, - maximum=1000000, - step=1, - value=0, - randomize=True) - randomize_seed = gr.Checkbox(label='Randomize seed', - value=True) - a_prompt = gr.Textbox( - label='Additional prompt', - value='best quality, extremely detailed') - n_prompt = gr.Textbox( - label='Negative prompt', - value= - 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality' - ) - with gr.Column(): - result = gr.Gallery(label='Output', show_label=False).style( - columns=2, object_fit='scale-down') - inputs = [ - image, - prompt, - a_prompt, - n_prompt, - num_samples, - image_resolution, - preprocess_resolution, - num_steps, - guidance_scale, - seed, - preprocessor_name, - ] - prompt.submit( - fn=randomize_seed_fn, - inputs=[seed, randomize_seed], - outputs=seed, - ).then( - fn=process, - inputs=inputs, - outputs=result, - ) - run_button.click( - fn=randomize_seed_fn, - inputs=[seed, randomize_seed], - outputs=seed, - ).then( - fn=process, - inputs=inputs, - outputs=result, - api_name='scribble', - ) - return demo - - -if __name__ == '__main__': - from model import Model - model = Model(task_name='lineart') - demo = create_demo(model.process_lineart) - demo.queue().launch() diff --git a/spaces/CosmicSage/Linaqruf-anything-v3.0pruned/README.md b/spaces/CosmicSage/Linaqruf-anything-v3.0pruned/README.md deleted file mode 100644 index 7f77ac6d7634544ef482b621c5d7d5d7c35f8ae0..0000000000000000000000000000000000000000 --- a/spaces/CosmicSage/Linaqruf-anything-v3.0pruned/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Linaqruf Anything V3.0pruned -emoji: 👁 -colorFrom: purple -colorTo: gray -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Cyril666/my_abi/demo.py b/spaces/Cyril666/my_abi/demo.py deleted file mode 100644 index 7dc9bb41a5164cff64686053a06c0435c09f9587..0000000000000000000000000000000000000000 --- a/spaces/Cyril666/my_abi/demo.py +++ /dev/null @@ -1,109 +0,0 @@ -import argparse -import logging -import os -import glob -import tqdm -import torch -import PIL -import cv2 -import numpy as np -import torch.nn.functional as F -from torchvision import transforms -from utils import Config, Logger, CharsetMapper - -def get_model(config): - import importlib - names = config.model_name.split('.') - module_name, class_name = '.'.join(names[:-1]), names[-1] - cls = getattr(importlib.import_module(module_name), class_name) - model = cls(config) - logging.info(model) - model = model.eval() - return model - -def preprocess(img, width, height): - img = cv2.resize(np.array(img), (width, height)) - img = transforms.ToTensor()(img).unsqueeze(0) - mean = torch.tensor([0.485, 0.456, 0.406]) - std = torch.tensor([0.229, 0.224, 0.225]) - return (img-mean[...,None,None]) / std[...,None,None] - -def postprocess(output, charset, model_eval): - def _get_output(last_output, model_eval): - if isinstance(last_output, (tuple, list)): - for res in last_output: - if res['name'] == model_eval: output = res - else: output = last_output - return output - - def _decode(logit): - """ Greed decode """ - out = F.softmax(logit, dim=2) - pt_text, pt_scores, pt_lengths = [], [], [] - for o in out: - text = charset.get_text(o.argmax(dim=1), padding=False, trim=False) - text = text.split(charset.null_char)[0] # end at end-token - pt_text.append(text) - pt_scores.append(o.max(dim=1)[0]) - pt_lengths.append(min(len(text) + 1, charset.max_length)) # one for end-token - return pt_text, pt_scores, pt_lengths - - output = _get_output(output, model_eval) - logits, pt_lengths = output['logits'], output['pt_lengths'] - pt_text, pt_scores, pt_lengths_ = _decode(logits) - - return pt_text, pt_scores, pt_lengths_ - -def load(model, file, device=None, strict=True): - if device is None: device = 'cpu' - elif isinstance(device, int): device = torch.device('cuda', device) - assert os.path.isfile(file) - state = torch.load(file, map_location=device) - if set(state.keys()) == {'model', 'opt'}: - state = state['model'] - model.load_state_dict(state, strict=strict) - return model - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument('--config', type=str, default='configs/train_abinet.yaml', - help='path to config file') - parser.add_argument('--input', type=str, default='figs/test') - parser.add_argument('--cuda', type=int, default=-1) - parser.add_argument('--checkpoint', type=str, default='workdir/train-abinet/best-train-abinet.pth') - parser.add_argument('--model_eval', type=str, default='alignment', - choices=['alignment', 'vision', 'language']) - args = parser.parse_args() - config = Config(args.config) - if args.checkpoint is not None: config.model_checkpoint = args.checkpoint - if args.model_eval is not None: config.model_eval = args.model_eval - config.global_phase = 'test' - config.model_vision_checkpoint, config.model_language_checkpoint = None, None - device = 'cpu' if args.cuda < 0 else f'cuda:{args.cuda}' - - Logger.init(config.global_workdir, config.global_name, config.global_phase) - Logger.enable_file() - logging.info(config) - - logging.info('Construct model.') - model = get_model(config).to(device) - model = load(model, config.model_checkpoint, device=device) - charset = CharsetMapper(filename=config.dataset_charset_path, - max_length=config.dataset_max_length + 1) - - if os.path.isdir(args.input): - paths = [os.path.join(args.input, fname) for fname in os.listdir(args.input)] - else: - paths = glob.glob(os.path.expanduser(args.input)) - assert paths, "The input path(s) was not found" - paths = sorted(paths) - for path in tqdm.tqdm(paths): - img = PIL.Image.open(path).convert('RGB') - img = preprocess(img, config.dataset_image_width, config.dataset_image_height) - img = img.to(device) - res = model(img) - pt_text, _, __ = postprocess(res, charset, config.model_eval) - logging.info(f'{path}: {pt_text[0]}') - -if __name__ == '__main__': - main() diff --git a/spaces/DCXGAO/DeepDanbooru_string/app.py b/spaces/DCXGAO/DeepDanbooru_string/app.py deleted file mode 100644 index 49019837c9207cc68cb37be0342f3bc44fd0decb..0000000000000000000000000000000000000000 --- a/spaces/DCXGAO/DeepDanbooru_string/app.py +++ /dev/null @@ -1,185 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations - -import argparse -import functools -import os -import html -import pathlib -import tarfile - -import deepdanbooru as dd -import gradio as gr -import huggingface_hub -import numpy as np -import PIL.Image -import tensorflow as tf -import piexif -import piexif.helper - -TITLE = 'DeepDanbooru String' - -TOKEN = os.environ['TOKEN'] -MODEL_REPO = 'CikeyQI/DeepDanbooru_string' -MODEL_FILENAME = 'model-resnet_custom_v3.h5' -LABEL_FILENAME = 'tags.txt' - - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser() - parser.add_argument('--score-slider-step', type=float, default=0.05) - parser.add_argument('--score-threshold', type=float, default=0.5) - parser.add_argument('--theme', type=str, default='dark-grass') - parser.add_argument('--live', action='store_true') - parser.add_argument('--share', action='store_true') - parser.add_argument('--port', type=int) - parser.add_argument('--disable-queue', - dest='enable_queue', - action='store_false') - parser.add_argument('--allow-flagging', type=str, default='never') - return parser.parse_args() - - -def load_sample_image_paths() -> list[pathlib.Path]: - image_dir = pathlib.Path('images') - if not image_dir.exists(): - dataset_repo = 'hysts/sample-images-TADNE' - path = huggingface_hub.hf_hub_download(dataset_repo, - 'images.tar.gz', - repo_type='dataset', - use_auth_token=TOKEN) - with tarfile.open(path) as f: - f.extractall() - return sorted(image_dir.glob('*')) - - -def load_model() -> tf.keras.Model: - path = huggingface_hub.hf_hub_download(MODEL_REPO, - MODEL_FILENAME, - use_auth_token=TOKEN) - model = tf.keras.models.load_model(path) - return model - - -def load_labels() -> list[str]: - path = huggingface_hub.hf_hub_download(MODEL_REPO, - LABEL_FILENAME, - use_auth_token=TOKEN) - with open(path) as f: - labels = [line.strip() for line in f.readlines()] - return labels - -def plaintext_to_html(text): - text = "

" + "
\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "

" - return text - -def predict(image: PIL.Image.Image, score_threshold: float, - model: tf.keras.Model, labels: list[str]) -> dict[str, float]: - rawimage = image - _, height, width, _ = model.input_shape - image = np.asarray(image) - image = tf.image.resize(image, - size=(height, width), - method=tf.image.ResizeMethod.AREA, - preserve_aspect_ratio=True) - image = image.numpy() - image = dd.image.transform_and_pad_image(image, width, height) - image = image / 255. - probs = model.predict(image[None, ...])[0] - probs = probs.astype(float) - res = dict() - for prob, label in zip(probs.tolist(), labels): - if prob < score_threshold: - continue - res[label] = prob - b = dict(sorted(res.items(),key=lambda item:item[1], reverse=True)) - a = ', '.join(list(b.keys())).replace('_',' ').replace('(','\(').replace(')','\)') - c = ', '.join(list(b.keys())) - - items = rawimage.info - geninfo = '' - - if "exif" in rawimage.info: - exif = piexif.load(rawimage.info["exif"]) - exif_comment = (exif or {}).get("Exif", {}).get(piexif.ExifIFD.UserComment, b'') - try: - exif_comment = piexif.helper.UserComment.load(exif_comment) - except ValueError: - exif_comment = exif_comment.decode('utf8', errors="ignore") - - items['exif comment'] = exif_comment - geninfo = exif_comment - - for field in ['jfif', 'jfif_version', 'jfif_unit', 'jfif_density', 'dpi', 'exif', - 'loop', 'background', 'timestamp', 'duration']: - items.pop(field, None) - - geninfo = items.get('parameters', geninfo) - - info = f""" -

PNG Info

-""" - for key, text in items.items(): - info += f""" -
-

{plaintext_to_html(str(key))}

-

{plaintext_to_html(str(text))}

-
-""".strip()+"\n" - - if len(info) == 0: - message = "Nothing found in the image." - info = f"

{message}

" - - return (a,c,res,info) - - -def main(): - args = parse_args() - model = load_model() - labels = load_labels() - - func = functools.partial(predict, model=model, labels=labels) - func = functools.update_wrapper(func, predict) - - gr.Interface( - func, - [ - gr.inputs.Image(type='pil', label='Input'), - gr.inputs.Slider(0, - 1, - step=args.score_slider_step, - default=args.score_threshold, - label='Score Threshold'), - ], - [ - gr.outputs.Textbox(label='Output (string)'), - gr.outputs.Textbox(label='Output (raw string)'), - gr.outputs.Label(label='Output (label)'), - gr.outputs.HTML() - ], - examples=[ - ['miku.jpg',0.5], - ['miku2.jpg',0.5] - ], - title=TITLE, - description=''' -Demo for [KichangKim/DeepDanbooru](https://github.com/KichangKim/DeepDanbooru) with "ready to copy" prompt and a prompt analyzer. - -Modified from [hysts/DeepDanbooru](https://huggingface.co/spaces/hysts/DeepDanbooru) - -PNG Info code forked from [AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) - ''', - theme=args.theme, - allow_flagging=args.allow_flagging, - live=args.live, - ).launch( - enable_queue=args.enable_queue, - server_port=args.port, - share=args.share, - ) - - -if __name__ == '__main__': - main() diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/ImageMode.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/ImageMode.py deleted file mode 100644 index a0b33514296df734501c553493b0a535eca49046..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/ImageMode.py +++ /dev/null @@ -1,90 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# standard mode descriptors -# -# History: -# 2006-03-20 fl Added -# -# Copyright (c) 2006 by Secret Labs AB. -# Copyright (c) 2006 by Fredrik Lundh. -# -# See the README file for information on usage and redistribution. -# - -import sys - -# mode descriptor cache -_modes = None - - -class ModeDescriptor: - """Wrapper for mode strings.""" - - def __init__(self, mode, bands, basemode, basetype, typestr): - self.mode = mode - self.bands = bands - self.basemode = basemode - self.basetype = basetype - self.typestr = typestr - - def __str__(self): - return self.mode - - -def getmode(mode): - """Gets a mode descriptor for the given mode.""" - global _modes - if not _modes: - # initialize mode cache - modes = {} - endian = "<" if sys.byteorder == "little" else ">" - for m, (basemode, basetype, bands, typestr) in { - # core modes - # Bits need to be extended to bytes - "1": ("L", "L", ("1",), "|b1"), - "L": ("L", "L", ("L",), "|u1"), - "I": ("L", "I", ("I",), endian + "i4"), - "F": ("L", "F", ("F",), endian + "f4"), - "P": ("P", "L", ("P",), "|u1"), - "RGB": ("RGB", "L", ("R", "G", "B"), "|u1"), - "RGBX": ("RGB", "L", ("R", "G", "B", "X"), "|u1"), - "RGBA": ("RGB", "L", ("R", "G", "B", "A"), "|u1"), - "CMYK": ("RGB", "L", ("C", "M", "Y", "K"), "|u1"), - "YCbCr": ("RGB", "L", ("Y", "Cb", "Cr"), "|u1"), - # UNDONE - unsigned |u1i1i1 - "LAB": ("RGB", "L", ("L", "A", "B"), "|u1"), - "HSV": ("RGB", "L", ("H", "S", "V"), "|u1"), - # extra experimental modes - "RGBa": ("RGB", "L", ("R", "G", "B", "a"), "|u1"), - "BGR;15": ("RGB", "L", ("B", "G", "R"), "|u1"), - "BGR;16": ("RGB", "L", ("B", "G", "R"), "|u1"), - "BGR;24": ("RGB", "L", ("B", "G", "R"), "|u1"), - "LA": ("L", "L", ("L", "A"), "|u1"), - "La": ("L", "L", ("L", "a"), "|u1"), - "PA": ("RGB", "L", ("P", "A"), "|u1"), - }.items(): - modes[m] = ModeDescriptor(m, bands, basemode, basetype, typestr) - # mapping modes - for i16mode, typestr in { - # I;16 == I;16L, and I;32 == I;32L - "I;16": "u2", - "I;16BS": ">i2", - "I;16N": endian + "u2", - "I;16NS": endian + "i2", - "I;32": "u4", - "I;32L": "i4", - "I;32LS": " 0: - self.last_model_path = os.path.join(self.ckpt_dir, last_model_lst[-1]) - checkpoint = torch.load(self.last_model_path, map_location=torch.device(device)) - self.best_accuracy = checkpoint['accuracy'] - self.acc_d = checkpoint['acc_d'] - - if len(best_model_lst) > 0: - self.best_model_path = os.path.join(self.ckpt_dir, best_model_lst[-1]) - best_checkpoint = torch.load(self.best_model_path, map_location=torch.device(device)) - self.best_accuracy = best_checkpoint['accuracy'] - self.acc_d = best_checkpoint['acc_d'] - if best: - checkpoint = best_checkpoint - - for k in self.acc_d: - if isinstance(self.acc_d[k], float): - self.acc_d[k] = { - 'acc': self.acc_d[k], - 'epoch': checkpoint['epoch'] - } - - if checkpoint is None: - logger.error("Invalid checkpoint") - return - - self.load_state_dict(checkpoint['net'], strict=False) - if optimizer and not best: # best的时候使用新的优化器比如从adam->sgd - logger.info('Load optimizer') - optimizer.load_state_dict(checkpoint['optimizer']) - for state in optimizer.state.values(): - for k, v in state.items(): - if torch.is_tensor(v): - state[k] = v.to(device) - - logger.info('*'*50) - if best: - logger.info(f"Lode best: {self.best_model_path}") - else: - logger.info(f"Lode last: {self.last_model_path}") - - logger.info(f"Best accuracy: {self.best_accuracy}") - logger.info(f"Last epoch: {checkpoint['epoch'] + 1}") - logger.info('*'*50) - return checkpoint['epoch'] + 1 - - def update_acc(self, acc_d, epoch, logger): - logger.info("-" * 100) - for k in acc_d: - if k not in self.acc_d.keys() or acc_d[k] > self.acc_d[k]['acc']: - self.acc_d[k] = { - 'acc': acc_d[k], - 'epoch': epoch - } - logger.info(f"Update ACC: {k} {self.acc_d[k]['acc']:.4f}({self.acc_d[k]['epoch']}-{epoch})") - logger.info("-" * 100) - - def save(self, optim, epoch, accuracy, logger, replace=True, acc_d=None, config=None): - """ - - :param config: - :param optim: - :param epoch: - :param accuracy: - :param logger: - :param replace: - :param acc_d: 其他评估数据,visible_2/3d, full_2/3d, rmse... - :return: - """ - if acc_d: - self.update_acc(acc_d, epoch, logger) - name = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S_last_{:.4f}_{}'.format(accuracy, epoch)) - name = f"model_{name}.pkl" - checkpoint = { - 'net': self.state_dict(), - 'optimizer': optim.state_dict(), - 'epoch': epoch, - 'accuracy': accuracy, - 'acc_d': acc_d - } - # FIXME:: delete always true - if (True or config.MODEL.SAVE_LAST) and epoch % config.TRAIN.SAVE_FREQ == 0: - if replace and self.last_model_path and os.path.exists(self.last_model_path): - os.remove(self.last_model_path) - self.last_model_path = os.path.join(self.ckpt_dir, name) - torch.save(checkpoint, self.last_model_path) - logger.info(f"Saved last model: {self.last_model_path}") - - if accuracy > self.best_accuracy: - self.best_accuracy = accuracy - # FIXME:: delete always true - if True or config.MODEL.SAVE_BEST: - if replace and self.best_model_path and os.path.exists(self.best_model_path): - os.remove(self.best_model_path) - self.best_model_path = os.path.join(self.ckpt_dir, name.replace('last', 'best')) - torch.save(checkpoint, self.best_model_path) - logger.info("#" * 100) - logger.info(f"Saved best model: {self.best_model_path}") - logger.info("#" * 100) \ No newline at end of file diff --git a/spaces/DeepLearning101/Speech-Quality-Inspection_Meta-Denoiser/denoiser/stft_loss.py b/spaces/DeepLearning101/Speech-Quality-Inspection_Meta-Denoiser/denoiser/stft_loss.py deleted file mode 100644 index 9a4b4c8dab1d4f8f53aa444f86a5c7132fa7b99f..0000000000000000000000000000000000000000 --- a/spaces/DeepLearning101/Speech-Quality-Inspection_Meta-Denoiser/denoiser/stft_loss.py +++ /dev/null @@ -1,144 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -# Original copyright 2019 Tomoki Hayashi -# MIT License (https://opensource.org/licenses/MIT) - -"""STFT-based Loss modules.""" - -import torch -import torch.nn.functional as F - - -def stft(x, fft_size, hop_size, win_length, window): - """Perform STFT and convert to magnitude spectrogram. - Args: - x (Tensor): Input signal tensor (B, T). - fft_size (int): FFT size. - hop_size (int): Hop size. - win_length (int): Window length. - window (str): Window function type. - Returns: - Tensor: Magnitude spectrogram (B, #frames, fft_size // 2 + 1). - """ - x_stft = torch.stft(x, fft_size, hop_size, win_length, window) - real = x_stft[..., 0] - imag = x_stft[..., 1] - - # NOTE(kan-bayashi): clamp is needed to avoid nan or inf - return torch.sqrt(torch.clamp(real ** 2 + imag ** 2, min=1e-7)).transpose(2, 1) - - -class SpectralConvergengeLoss(torch.nn.Module): - """Spectral convergence loss module.""" - - def __init__(self): - """Initilize spectral convergence loss module.""" - super(SpectralConvergengeLoss, self).__init__() - - def forward(self, x_mag, y_mag): - """Calculate forward propagation. - Args: - x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins). - y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins). - Returns: - Tensor: Spectral convergence loss value. - """ - return torch.norm(y_mag - x_mag, p="fro") / torch.norm(y_mag, p="fro") - - -class LogSTFTMagnitudeLoss(torch.nn.Module): - """Log STFT magnitude loss module.""" - - def __init__(self): - """Initilize los STFT magnitude loss module.""" - super(LogSTFTMagnitudeLoss, self).__init__() - - def forward(self, x_mag, y_mag): - """Calculate forward propagation. - Args: - x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins). - y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins). - Returns: - Tensor: Log STFT magnitude loss value. - """ - return F.l1_loss(torch.log(y_mag), torch.log(x_mag)) - - -class STFTLoss(torch.nn.Module): - """STFT loss module.""" - - def __init__(self, fft_size=1024, shift_size=120, win_length=600, window="hann_window"): - """Initialize STFT loss module.""" - super(STFTLoss, self).__init__() - self.fft_size = fft_size - self.shift_size = shift_size - self.win_length = win_length - self.window = getattr(torch, window)(win_length) - self.spectral_convergenge_loss = SpectralConvergengeLoss() - self.log_stft_magnitude_loss = LogSTFTMagnitudeLoss() - - def forward(self, x, y): - """Calculate forward propagation. - Args: - x (Tensor): Predicted signal (B, T). - y (Tensor): Groundtruth signal (B, T). - Returns: - Tensor: Spectral convergence loss value. - Tensor: Log STFT magnitude loss value. - """ - x_mag = stft(x, self.fft_size, self.shift_size, self.win_length, self.window) - y_mag = stft(y, self.fft_size, self.shift_size, self.win_length, self.window) - sc_loss = self.spectral_convergenge_loss(x_mag, y_mag) - mag_loss = self.log_stft_magnitude_loss(x_mag, y_mag) - - return sc_loss, mag_loss - - -class MultiResolutionSTFTLoss(torch.nn.Module): - """Multi resolution STFT loss module.""" - - def __init__(self, - fft_sizes=[1024, 2048, 512], - hop_sizes=[120, 240, 50], - win_lengths=[600, 1200, 240], - window="hann_window", factor_sc=0.1, factor_mag=0.1): - """Initialize Multi resolution STFT loss module. - Args: - fft_sizes (list): List of FFT sizes. - hop_sizes (list): List of hop sizes. - win_lengths (list): List of window lengths. - window (str): Window function type. - factor (float): a balancing factor across different losses. - """ - super(MultiResolutionSTFTLoss, self).__init__() - assert len(fft_sizes) == len(hop_sizes) == len(win_lengths) - self.stft_losses = torch.nn.ModuleList() - for fs, ss, wl in zip(fft_sizes, hop_sizes, win_lengths): - self.stft_losses += [STFTLoss(fs, ss, wl, window)] - self.factor_sc = factor_sc - self.factor_mag = factor_mag - - def forward(self, x, y): - """Calculate forward propagation. - Args: - x (Tensor): Predicted signal (B, T). - y (Tensor): Groundtruth signal (B, T). - Returns: - Tensor: Multi resolution spectral convergence loss value. - Tensor: Multi resolution log STFT magnitude loss value. - """ - sc_loss = 0.0 - mag_loss = 0.0 - for f in self.stft_losses: - sc_l, mag_l = f(x, y) - sc_loss += sc_l - mag_loss += mag_l - sc_loss /= len(self.stft_losses) - mag_loss /= len(self.stft_losses) - - return self.factor_sc*sc_loss, self.factor_mag*mag_loss diff --git a/spaces/DragGan/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/GUI.py b/spaces/DragGan/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/GUI.py deleted file mode 100644 index 19f7f8cce9305819b22664642799200d9e1cfff0..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/GUI.py +++ /dev/null @@ -1,103 +0,0 @@ - - -from tkinter import Tk,Frame ,Label,Button,messagebox,Canvas,Text,Scale -from tkinter import HORIZONTAL - -class View(): - def __init__(self,master): - - self.width=600 - self.height=600 - - - self.root=master - self.root.geometry("600x600") - - self.left_frame=Frame(self.root,width=600) - self.left_frame.pack_propagate(0) - self.left_frame.pack(fill='both', side='left', expand='True') - - self.retrieval_frame=Frame(self.root,bg='snow3') - self.retrieval_frame.pack_propagate(0) - self.retrieval_frame.pack(fill='both', side='right', expand='True') - - self.bg_frame=Frame(self.left_frame,bg='snow3',height=600,width=600) - self.bg_frame.pack_propagate(0) - self.bg_frame.pack(fill='both', side='top', expand='True') - - self.command_frame=Frame(self.left_frame,bg='snow3') - self.command_frame.pack_propagate(0) - self.command_frame.pack(fill='both', side='bottom', expand='True') -# self.command_frame.grid(row=1, column=0,padx=0, pady=0) - - self.bg=Canvas(self.bg_frame,width=self.width,height=self.height, bg='gray') - self.bg.place(relx=0.5, rely=0.5, anchor='center') - - self.mani=Canvas(self.retrieval_frame,width=1024,height=1024, bg='gray') - self.mani.grid(row=0, column=0,padx=0, pady=42) - - self.SetCommand() - - - - - def run(self): - self.root.mainloop() - - def helloCallBack(self): - category=self.set_category.get() - messagebox.showinfo( "Hello Python",category) - - def SetCommand(self): - - tmp = Label(self.command_frame, text="neutral", width=10 ,bg='snow3') - tmp.grid(row=1, column=0,padx=10, pady=10) - - tmp = Label(self.command_frame, text="a photo of a", width=10 ,bg='snow3') - tmp.grid(row=1, column=1,padx=10, pady=10) - - self.neutral = Text ( self.command_frame, height=2, width=30) - self.neutral.grid(row=1, column=2,padx=10, pady=10) - - - tmp = Label(self.command_frame, text="target", width=10 ,bg='snow3') - tmp.grid(row=2, column=0,padx=10, pady=10) - - tmp = Label(self.command_frame, text="a photo of a", width=10 ,bg='snow3') - tmp.grid(row=2, column=1,padx=10, pady=10) - - self.target = Text ( self.command_frame, height=2, width=30) - self.target.grid(row=2, column=2,padx=10, pady=10) - - tmp = Label(self.command_frame, text="strength", width=10 ,bg='snow3') - tmp.grid(row=3, column=0,padx=10, pady=10) - - self.alpha = Scale(self.command_frame, from_=-15, to=25, orient=HORIZONTAL,bg='snow3', length=250,resolution=0.01) - self.alpha.grid(row=3, column=2,padx=10, pady=10) - - - tmp = Label(self.command_frame, text="disentangle", width=10 ,bg='snow3') - tmp.grid(row=4, column=0,padx=10, pady=10) - - self.beta = Scale(self.command_frame, from_=0.08, to=0.4, orient=HORIZONTAL,bg='snow3', length=250,resolution=0.001) - self.beta.grid(row=4, column=2,padx=10, pady=10) - - self.reset = Button(self.command_frame, text='Reset') - self.reset.grid(row=5, column=1,padx=10, pady=10) - - - self.set_init = Button(self.command_frame, text='Accept') - self.set_init.grid(row=5, column=2,padx=10, pady=10) - -#%% -if __name__ == "__main__": - master=Tk() - self=View(master) - self.run() - - - - - - - \ No newline at end of file diff --git a/spaces/EXPOSUREEE/Ai-Image-Enhancer/realesrgan/__init__.py b/spaces/EXPOSUREEE/Ai-Image-Enhancer/realesrgan/__init__.py deleted file mode 100644 index bfea78f284116dee22510d4aa91f9e44afb7d472..0000000000000000000000000000000000000000 --- a/spaces/EXPOSUREEE/Ai-Image-Enhancer/realesrgan/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# flake8: noqa -from .archs import * -from .data import * -from .models import * -from .utils import * -#from .version import * diff --git a/spaces/Eddycrack864/Applio-Inference/infer/lib/infer_pack/models.py b/spaces/Eddycrack864/Applio-Inference/infer/lib/infer_pack/models.py deleted file mode 100644 index 7a387b888f63ecd6f1f1bd3ed10aa2176a944d2c..0000000000000000000000000000000000000000 --- a/spaces/Eddycrack864/Applio-Inference/infer/lib/infer_pack/models.py +++ /dev/null @@ -1,1174 +0,0 @@ -import math -import logging - -logger = logging.getLogger(__name__) - -import numpy as np -import torch -from torch import nn -from torch.nn import AvgPool1d, Conv1d, Conv2d, ConvTranspose1d -from torch.nn import functional as F -from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm - -from infer.lib.infer_pack import attentions, commons, modules -from infer.lib.infer_pack.commons import get_padding, init_weights -has_xpu = bool(hasattr(torch, "xpu") and torch.xpu.is_available()) - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder768(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(768, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - if uv.device.type == "privateuseone": # for DirectML - uv = uv.float() - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - if hasattr(self, "ddtype") == False: - self.ddtype = self.l_linear.weight.dtype - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - # print(x.dtype,sine_wavs.dtype,self.l_linear.weight.dtype) - # if self.is_half: - # sine_wavs = sine_wavs.half() - # sine_merge = self.l_tanh(self.l_linear(sine_wavs.to(x))) - # print(sine_wavs.dtype,self.ddtype) - if sine_wavs.dtype != self.ddtype: - sine_wavs = sine_wavs.to(self.ddtype) - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - logger.debug( - "gin_channels: " - + str(gin_channels) - + ", self.spk_embed_dim: " - + str(self.spk_embed_dim) - ) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - nsff0 = nsff0[:, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - logger.debug( - "gin_channels: " - + str(gin_channels) - + ", self.spk_embed_dim: " - + str(self.spk_embed_dim) - ) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - nsff0 = nsff0[:, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - logger.debug( - "gin_channels: " - + str(gin_channels) - + ", self.spk_embed_dim: " - + str(self.spk_embed_dim) - ) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - logger.debug( - "gin_channels: " - + str(gin_channels) - + ", self.spk_embed_dim: " - + str(self.spk_embed_dim) - ) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class MultiPeriodDiscriminatorV2(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminatorV2, self).__init__() - # periods = [2, 3, 5, 7, 11, 17] - periods = [2, 3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - if has_xpu and x.dtype == torch.bfloat16: - x = F.pad(x.to(dtype=torch.float16), (0, n_pad), "reflect").to(dtype=torch.bfloat16) - else: - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/ElainaFanBoy/IRONY-Real-ESRGAN/realesrgan/models/realesrnet_model.py b/spaces/ElainaFanBoy/IRONY-Real-ESRGAN/realesrgan/models/realesrnet_model.py deleted file mode 100644 index d11668f3712bffcd062c57db14d22ca3a0e1e59d..0000000000000000000000000000000000000000 --- a/spaces/ElainaFanBoy/IRONY-Real-ESRGAN/realesrgan/models/realesrnet_model.py +++ /dev/null @@ -1,188 +0,0 @@ -import numpy as np -import random -import torch -from basicsr.data.degradations import random_add_gaussian_noise_pt, random_add_poisson_noise_pt -from basicsr.data.transforms import paired_random_crop -from basicsr.models.sr_model import SRModel -from basicsr.utils import DiffJPEG, USMSharp -from basicsr.utils.img_process_util import filter2D -from basicsr.utils.registry import MODEL_REGISTRY -from torch.nn import functional as F - - -@MODEL_REGISTRY.register() -class RealESRNetModel(SRModel): - """RealESRNet Model for Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data. - - It is trained without GAN losses. - It mainly performs: - 1. randomly synthesize LQ images in GPU tensors - 2. optimize the networks with GAN training. - """ - - def __init__(self, opt): - super(RealESRNetModel, self).__init__(opt) - self.jpeger = DiffJPEG(differentiable=False).cuda() # simulate JPEG compression artifacts - self.usm_sharpener = USMSharp().cuda() # do usm sharpening - self.queue_size = opt.get('queue_size', 180) - - @torch.no_grad() - def _dequeue_and_enqueue(self): - """It is the training pair pool for increasing the diversity in a batch. - - Batch processing limits the diversity of synthetic degradations in a batch. For example, samples in a - batch could not have different resize scaling factors. Therefore, we employ this training pair pool - to increase the degradation diversity in a batch. - """ - # initialize - b, c, h, w = self.lq.size() - if not hasattr(self, 'queue_lr'): - assert self.queue_size % b == 0, f'queue size {self.queue_size} should be divisible by batch size {b}' - self.queue_lr = torch.zeros(self.queue_size, c, h, w).cuda() - _, c, h, w = self.gt.size() - self.queue_gt = torch.zeros(self.queue_size, c, h, w).cuda() - self.queue_ptr = 0 - if self.queue_ptr == self.queue_size: # the pool is full - # do dequeue and enqueue - # shuffle - idx = torch.randperm(self.queue_size) - self.queue_lr = self.queue_lr[idx] - self.queue_gt = self.queue_gt[idx] - # get first b samples - lq_dequeue = self.queue_lr[0:b, :, :, :].clone() - gt_dequeue = self.queue_gt[0:b, :, :, :].clone() - # update the queue - self.queue_lr[0:b, :, :, :] = self.lq.clone() - self.queue_gt[0:b, :, :, :] = self.gt.clone() - - self.lq = lq_dequeue - self.gt = gt_dequeue - else: - # only do enqueue - self.queue_lr[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.lq.clone() - self.queue_gt[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.gt.clone() - self.queue_ptr = self.queue_ptr + b - - @torch.no_grad() - def feed_data(self, data): - """Accept data from dataloader, and then add two-order degradations to obtain LQ images. - """ - if self.is_train and self.opt.get('high_order_degradation', True): - # training data synthesis - self.gt = data['gt'].to(self.device) - # USM sharpen the GT images - if self.opt['gt_usm'] is True: - self.gt = self.usm_sharpener(self.gt) - - self.kernel1 = data['kernel1'].to(self.device) - self.kernel2 = data['kernel2'].to(self.device) - self.sinc_kernel = data['sinc_kernel'].to(self.device) - - ori_h, ori_w = self.gt.size()[2:4] - - # ----------------------- The first degradation process ----------------------- # - # blur - out = filter2D(self.gt, self.kernel1) - # random resize - updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob'])[0] - if updown_type == 'up': - scale = np.random.uniform(1, self.opt['resize_range'][1]) - elif updown_type == 'down': - scale = np.random.uniform(self.opt['resize_range'][0], 1) - else: - scale = 1 - mode = random.choice(['area', 'bilinear', 'bicubic']) - out = F.interpolate(out, scale_factor=scale, mode=mode) - # add noise - gray_noise_prob = self.opt['gray_noise_prob'] - if np.random.uniform() < self.opt['gaussian_noise_prob']: - out = random_add_gaussian_noise_pt( - out, sigma_range=self.opt['noise_range'], clip=True, rounds=False, gray_prob=gray_noise_prob) - else: - out = random_add_poisson_noise_pt( - out, - scale_range=self.opt['poisson_scale_range'], - gray_prob=gray_noise_prob, - clip=True, - rounds=False) - # JPEG compression - jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range']) - out = torch.clamp(out, 0, 1) # clamp to [0, 1], otherwise JPEGer will result in unpleasant artifacts - out = self.jpeger(out, quality=jpeg_p) - - # ----------------------- The second degradation process ----------------------- # - # blur - if np.random.uniform() < self.opt['second_blur_prob']: - out = filter2D(out, self.kernel2) - # random resize - updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob2'])[0] - if updown_type == 'up': - scale = np.random.uniform(1, self.opt['resize_range2'][1]) - elif updown_type == 'down': - scale = np.random.uniform(self.opt['resize_range2'][0], 1) - else: - scale = 1 - mode = random.choice(['area', 'bilinear', 'bicubic']) - out = F.interpolate( - out, size=(int(ori_h / self.opt['scale'] * scale), int(ori_w / self.opt['scale'] * scale)), mode=mode) - # add noise - gray_noise_prob = self.opt['gray_noise_prob2'] - if np.random.uniform() < self.opt['gaussian_noise_prob2']: - out = random_add_gaussian_noise_pt( - out, sigma_range=self.opt['noise_range2'], clip=True, rounds=False, gray_prob=gray_noise_prob) - else: - out = random_add_poisson_noise_pt( - out, - scale_range=self.opt['poisson_scale_range2'], - gray_prob=gray_noise_prob, - clip=True, - rounds=False) - - # JPEG compression + the final sinc filter - # We also need to resize images to desired sizes. We group [resize back + sinc filter] together - # as one operation. - # We consider two orders: - # 1. [resize back + sinc filter] + JPEG compression - # 2. JPEG compression + [resize back + sinc filter] - # Empirically, we find other combinations (sinc + JPEG + Resize) will introduce twisted lines. - if np.random.uniform() < 0.5: - # resize back + the final sinc filter - mode = random.choice(['area', 'bilinear', 'bicubic']) - out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode) - out = filter2D(out, self.sinc_kernel) - # JPEG compression - jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2']) - out = torch.clamp(out, 0, 1) - out = self.jpeger(out, quality=jpeg_p) - else: - # JPEG compression - jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2']) - out = torch.clamp(out, 0, 1) - out = self.jpeger(out, quality=jpeg_p) - # resize back + the final sinc filter - mode = random.choice(['area', 'bilinear', 'bicubic']) - out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode) - out = filter2D(out, self.sinc_kernel) - - # clamp and round - self.lq = torch.clamp((out * 255.0).round(), 0, 255) / 255. - - # random crop - gt_size = self.opt['gt_size'] - self.gt, self.lq = paired_random_crop(self.gt, self.lq, gt_size, self.opt['scale']) - - # training pair pool - self._dequeue_and_enqueue() - self.lq = self.lq.contiguous() # for the warning: grad and param do not obey the gradient layout contract - else: - # for paired training or validation - self.lq = data['lq'].to(self.device) - if 'gt' in data: - self.gt = data['gt'].to(self.device) - self.gt_usm = self.usm_sharpener(self.gt) - - def nondist_validation(self, dataloader, current_iter, tb_logger, save_img): - # do not use the synthetic process during validation - self.is_train = False - super(RealESRNetModel, self).nondist_validation(dataloader, current_iter, tb_logger, save_img) - self.is_train = True diff --git a/spaces/ElainaFanBoy/MusicGen/audiocraft/utils/utils.py b/spaces/ElainaFanBoy/MusicGen/audiocraft/utils/utils.py deleted file mode 100644 index 86e1448d065fa182ca69aae00d2f2a7eea55d8a4..0000000000000000000000000000000000000000 --- a/spaces/ElainaFanBoy/MusicGen/audiocraft/utils/utils.py +++ /dev/null @@ -1,234 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from concurrent.futures import ProcessPoolExecutor -from functools import wraps -import hashlib -import logging -import typing as tp - -import flashy -import flashy.distrib -import omegaconf -import torch -from torch.nn.utils.rnn import pad_sequence - - -logger = logging.getLogger(__name__) - - -def dict_from_config(cfg: omegaconf.DictConfig) -> dict: - """Convenience function to map an omegaconf configuration to a dictionary. - - Args: - cfg (omegaconf.DictConfig): Original configuration to map to dict. - Returns: - dict: Config as dictionary object. - """ - dct = omegaconf.OmegaConf.to_container(cfg, resolve=True) - assert isinstance(dct, dict) - return dct - - -def random_subset(dataset, max_samples: int, seed: int = 42) -> torch.utils.data.Subset: - if max_samples >= len(dataset): - return dataset - - generator = torch.Generator().manual_seed(seed) - perm = torch.randperm(len(dataset), generator=generator) - return torch.utils.data.Subset(dataset, perm[:max_samples].tolist()) - - -def get_loader(dataset, num_samples: tp.Optional[int], batch_size: int, - num_workers: int, seed: int, **kwargs) -> torch.utils.data.DataLoader: - """Convenience function to load dataset into a dataloader with optional subset sampling. - - Args: - dataset: Dataset to load. - num_samples (Optional[int]): Number of samples to limit subset size. - batch_size (int): Batch size. - num_workers (int): Number of workers for data loading. - seed (int): Random seed. - """ - if num_samples is not None: - dataset = random_subset(dataset, num_samples, seed) - - dataloader = flashy.distrib.loader( - dataset, - batch_size=batch_size, - num_workers=num_workers, - **kwargs - ) - return dataloader - - -def get_dataset_from_loader(dataloader): - dataset = dataloader.dataset - if isinstance(dataset, torch.utils.data.Subset): - return dataset.dataset - else: - return dataset - - -def multinomial(input: torch.Tensor, num_samples: int, replacement=False, *, generator=None): - """torch.multinomial with arbitrary number of dimensions, and number of candidates on the last dimension. - - Args: - input (torch.Tensor): The input tensor containing probabilities. - num_samples (int): Number of samples to draw. - replacement (bool): Whether to draw with replacement or not. - Keywords args: - generator (torch.Generator): A pseudorandom number generator for sampling. - Returns: - torch.Tensor: Last dimension contains num_samples indices - sampled from the multinomial probability distribution - located in the last dimension of tensor input. - """ - input_ = input.reshape(-1, input.shape[-1]) - output_ = torch.multinomial(input_, num_samples=num_samples, replacement=replacement, generator=generator) - output = output_.reshape(*list(input.shape[:-1]), -1) - return output - - -def sample_top_k(probs: torch.Tensor, k: int) -> torch.Tensor: - """Sample next token from top K values along the last dimension of the input probs tensor. - - Args: - probs (torch.Tensor): Input probabilities with token candidates on the last dimension. - k (int): The k in “top-k”. - Returns: - torch.Tensor: Sampled tokens. - """ - top_k_value, _ = torch.topk(probs, k, dim=-1) - min_value_top_k = top_k_value[..., [-1]] - probs *= (probs >= min_value_top_k).float() - probs.div_(probs.sum(dim=-1, keepdim=True)) - next_token = multinomial(probs, num_samples=1) - return next_token - - -def sample_top_p(probs: torch.Tensor, p: float) -> torch.Tensor: - """Sample next token from top P probabilities along the last dimension of the input probs tensor. - - Args: - probs (torch.Tensor): Input probabilities with token candidates on the last dimension. - p (int): The p in “top-p”. - Returns: - torch.Tensor: Sampled tokens. - """ - probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True) - probs_sum = torch.cumsum(probs_sort, dim=-1) - mask = probs_sum - probs_sort > p - probs_sort *= (~mask).float() - probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True)) - next_token = multinomial(probs_sort, num_samples=1) - next_token = torch.gather(probs_idx, -1, next_token) - return next_token - - -class DummyPoolExecutor: - """Dummy pool executor to use when we actually have only 1 worker. - (e.g. instead of ProcessPoolExecutor). - """ - class DummyResult: - def __init__(self, func, *args, **kwargs): - self.func = func - self.args = args - self.kwargs = kwargs - - def result(self): - return self.func(*self.args, **self.kwargs) - - def __init__(self, workers, mp_context=None): - pass - - def submit(self, func, *args, **kwargs): - return DummyPoolExecutor.DummyResult(func, *args, **kwargs) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, exc_tb): - return - - -def get_pool_executor(num_workers: int, mp_context=None): - return ProcessPoolExecutor(num_workers, mp_context) if num_workers > 1 else DummyPoolExecutor(1) - - -def length_to_mask(lengths: torch.Tensor, max_len: tp.Optional[int] = None) -> torch.Tensor: - """Utility function to convert a tensor of sequence lengths to a mask (useful when working on padded sequences). - For example: [3, 5] => [[1, 1, 1, 0, 0], [1, 1, 1, 1, 1]] - - Args: - lengths (torch.Tensor): tensor with lengths - max_len (int): can set the max length manually. Defaults to None. - Returns: - torch.Tensor: mask with 0s where there is pad tokens else 1s - """ - assert len(lengths.shape) == 1, "Length shape should be 1 dimensional." - final_length = lengths.max().item() if not max_len else max_len - final_length = max(final_length, 1) # if all seqs are of len zero we don't want a zero-size tensor - return torch.arange(final_length)[None, :].to(lengths.device) < lengths[:, None] - - -def hash_trick(word: str, vocab_size: int) -> int: - """Hash trick to pair each word with an index - - Args: - word (str): word we wish to convert to an index - vocab_size (int): size of the vocabulary - Returns: - int: index of the word in the embedding LUT - """ - hash = int(hashlib.sha256(word.encode("utf-8")).hexdigest(), 16) - return hash % vocab_size - - -def with_rank_rng(base_seed: int = 1234): - """Decorator for a function so that the function will use a Random Number Generator - whose state depend on the GPU rank. The original RNG state is restored upon returning. - - Args: - base_seed (int): Random seed. - """ - def _decorator(fun: tp.Callable): - @wraps(fun) - def _decorated(*args, **kwargs): - state = torch.get_rng_state() - seed = base_seed ^ flashy.distrib.rank() - torch.manual_seed(seed) - logger.debug('Rank dependent seed set to %d', seed) - try: - return fun(*args, **kwargs) - finally: - torch.set_rng_state(state) - logger.debug('RNG state restored.') - return _decorated - return _decorator - - -def collate(tensors: tp.List[torch.Tensor], dim: int = 0) -> tp.Tuple[torch.Tensor, torch.Tensor]: - """Get a list of tensors and collate them to a single tensor. according to the following logic: - - `dim` specifies the time dimension which will be stacked and padded. - - The output will contain 1 new dimension (dimension index 0) which will be the size of - of the original list. - - Args: - tensors (tp.List[torch.Tensor]): List of tensors to collate. - dim (int): Dimension which will be stacked and padded. - Returns: - tp.Tuple[torch.Tensor, torch.Tensor]: - torch.Tensor: Stacked and padded tensor. The output will contain 1 new dimension - (dimension index 0) which will be the size of the original list. - torch.Tensor: Tensor containing length of original tensor sizes (without padding). - """ - tensors = [x.transpose(0, dim) for x in tensors] - lens = torch.LongTensor([len(x) for x in tensors]) - padded_tensors = pad_sequence(tensors) - padded_tensors = padded_tensors.transpose(0, 1) - padded_tensors = padded_tensors.transpose(1, dim + 1) - return padded_tensors, lens diff --git a/spaces/EsoCode/text-generation-webui/modules/presets.py b/spaces/EsoCode/text-generation-webui/modules/presets.py deleted file mode 100644 index d8ae6e192326363a905ef25ebcb3d89c63a4de6d..0000000000000000000000000000000000000000 --- a/spaces/EsoCode/text-generation-webui/modules/presets.py +++ /dev/null @@ -1,55 +0,0 @@ -import functools -from pathlib import Path - -import yaml - - -def load_preset(name): - generate_params = { - 'do_sample': True, - 'temperature': 1, - 'top_p': 1, - 'typical_p': 1, - 'epsilon_cutoff': 0, - 'eta_cutoff': 0, - 'tfs': 1, - 'top_a': 0, - 'repetition_penalty': 1, - 'repetition_penalty_range': 0, - 'encoder_repetition_penalty': 1, - 'top_k': 0, - 'num_beams': 1, - 'penalty_alpha': 0, - 'min_length': 0, - 'length_penalty': 1, - 'no_repeat_ngram_size': 0, - 'early_stopping': False, - 'mirostat_mode': 0, - 'mirostat_tau': 5.0, - 'mirostat_eta': 0.1, - } - - with open(Path(f'presets/{name}.yaml'), 'r') as infile: - preset = yaml.safe_load(infile) - - for k in preset: - generate_params[k] = preset[k] - - generate_params['temperature'] = min(1.99, generate_params['temperature']) - return generate_params - - -@functools.cache -def load_preset_memoized(name): - return load_preset(name) - - -def load_preset_for_ui(name, state): - generate_params = load_preset(name) - state.update(generate_params) - return state, *[generate_params[k] for k in ['do_sample', 'temperature', 'top_p', 'typical_p', 'epsilon_cutoff', 'eta_cutoff', 'repetition_penalty', 'repetition_penalty_range', 'encoder_repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping', 'mirostat_mode', 'mirostat_tau', 'mirostat_eta', 'tfs', 'top_a']] - - -def generate_preset_yaml(state): - data = {k: state[k] for k in ['do_sample', 'temperature', 'top_p', 'typical_p', 'epsilon_cutoff', 'eta_cutoff', 'repetition_penalty', 'repetition_penalty_range', 'encoder_repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping', 'mirostat_mode', 'mirostat_tau', 'mirostat_eta', 'tfs', 'top_a']} - return yaml.dump(data, sort_keys=False) diff --git a/spaces/EuroPython2022/mmocr-demo/configs/_base_/det_pipelines/panet_pipeline.py b/spaces/EuroPython2022/mmocr-demo/configs/_base_/det_pipelines/panet_pipeline.py deleted file mode 100644 index eae50de4fab0536d114509854f9250c0d613cb3c..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/mmocr-demo/configs/_base_/det_pipelines/panet_pipeline.py +++ /dev/null @@ -1,156 +0,0 @@ -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -# for ctw1500 -img_scale_train_ctw1500 = [(3000, 640)] -shrink_ratio_train_ctw1500 = (1.0, 0.7) -target_size_train_ctw1500 = (640, 640) -train_pipeline_ctw1500 = [ - dict(type='LoadImageFromFile', color_type='color_ignore_orientation'), - dict( - type='LoadTextAnnotations', - with_bbox=True, - with_mask=True, - poly2mask=False), - dict(type='ColorJitter', brightness=32.0 / 255, saturation=0.5), - dict(type='Normalize', **img_norm_cfg), - dict( - type='ScaleAspectJitter', - img_scale=img_scale_train_ctw1500, - ratio_range=(0.7, 1.3), - aspect_ratio_range=(0.9, 1.1), - multiscale_mode='value', - keep_ratio=False), - # shrink_ratio is from big to small. The 1st must be 1.0 - dict(type='PANetTargets', shrink_ratio=shrink_ratio_train_ctw1500), - dict(type='RandomFlip', flip_ratio=0.5, direction='horizontal'), - dict(type='RandomRotateTextDet'), - dict( - type='RandomCropInstances', - target_size=target_size_train_ctw1500, - instance_key='gt_kernels'), - dict(type='Pad', size_divisor=32), - dict( - type='CustomFormatBundle', - keys=['gt_kernels', 'gt_mask'], - visualize=dict(flag=False, boundary_key='gt_kernels')), - dict(type='Collect', keys=['img', 'gt_kernels', 'gt_mask']) -] - -img_scale_test_ctw1500 = (3000, 640) -test_pipeline_ctw1500 = [ - dict(type='LoadImageFromFile', color_type='color_ignore_orientation'), - dict( - type='MultiScaleFlipAug', - img_scale=img_scale_test_ctw1500, # used by Resize - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -# for icdar2015 -img_scale_train_icdar2015 = [(3000, 736)] -shrink_ratio_train_icdar2015 = (1.0, 0.5) -target_size_train_icdar2015 = (736, 736) -train_pipeline_icdar2015 = [ - dict(type='LoadImageFromFile', color_type='color_ignore_orientation'), - dict( - type='LoadTextAnnotations', - with_bbox=True, - with_mask=True, - poly2mask=False), - dict(type='ColorJitter', brightness=32.0 / 255, saturation=0.5), - dict(type='Normalize', **img_norm_cfg), - dict( - type='ScaleAspectJitter', - img_scale=img_scale_train_icdar2015, - ratio_range=(0.7, 1.3), - aspect_ratio_range=(0.9, 1.1), - multiscale_mode='value', - keep_ratio=False), - dict(type='PANetTargets', shrink_ratio=shrink_ratio_train_icdar2015), - dict(type='RandomFlip', flip_ratio=0.5, direction='horizontal'), - dict(type='RandomRotateTextDet'), - dict( - type='RandomCropInstances', - target_size=target_size_train_icdar2015, - instance_key='gt_kernels'), - dict(type='Pad', size_divisor=32), - dict( - type='CustomFormatBundle', - keys=['gt_kernels', 'gt_mask'], - visualize=dict(flag=False, boundary_key='gt_kernels')), - dict(type='Collect', keys=['img', 'gt_kernels', 'gt_mask']) -] - -img_scale_test_icdar2015 = (1333, 736) -test_pipeline_icdar2015 = [ - dict(type='LoadImageFromFile', color_type='color_ignore_orientation'), - dict( - type='MultiScaleFlipAug', - img_scale=img_scale_test_icdar2015, # used by Resize - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -# for icdar2017 -img_scale_train_icdar2017 = [(3000, 800)] -shrink_ratio_train_icdar2017 = (1.0, 0.5) -target_size_train_icdar2017 = (800, 800) -train_pipeline_icdar2017 = [ - dict(type='LoadImageFromFile', color_type='color_ignore_orientation'), - dict( - type='LoadTextAnnotations', - with_bbox=True, - with_mask=True, - poly2mask=False), - dict(type='ColorJitter', brightness=32.0 / 255, saturation=0.5), - dict(type='Normalize', **img_norm_cfg), - dict( - type='ScaleAspectJitter', - img_scale=img_scale_train_icdar2017, - ratio_range=(0.7, 1.3), - aspect_ratio_range=(0.9, 1.1), - multiscale_mode='value', - keep_ratio=False), - dict(type='PANetTargets', shrink_ratio=shrink_ratio_train_icdar2017), - dict(type='RandomFlip', flip_ratio=0.5, direction='horizontal'), - dict(type='RandomRotateTextDet'), - dict( - type='RandomCropInstances', - target_size=target_size_train_icdar2017, - instance_key='gt_kernels'), - dict(type='Pad', size_divisor=32), - dict( - type='CustomFormatBundle', - keys=['gt_kernels', 'gt_mask'], - visualize=dict(flag=False, boundary_key='gt_kernels')), - dict(type='Collect', keys=['img', 'gt_kernels', 'gt_mask']) -] - -img_scale_test_icdar2017 = (1333, 800) -test_pipeline_icdar2017 = [ - dict(type='LoadImageFromFile', color_type='color_ignore_orientation'), - dict( - type='MultiScaleFlipAug', - img_scale=img_scale_test_icdar2017, # used by Resize - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] diff --git a/spaces/FelixLuoX/codeformer/CodeFormer/scripts/download_pretrained_models.py b/spaces/FelixLuoX/codeformer/CodeFormer/scripts/download_pretrained_models.py deleted file mode 100644 index daa6e8ca14ea91c89a318e85d9f182eb7d1bf025..0000000000000000000000000000000000000000 --- a/spaces/FelixLuoX/codeformer/CodeFormer/scripts/download_pretrained_models.py +++ /dev/null @@ -1,40 +0,0 @@ -import argparse -import os -from os import path as osp - -from basicsr.utils.download_util import load_file_from_url - - -def download_pretrained_models(method, file_urls): - save_path_root = f'./weights/{method}' - os.makedirs(save_path_root, exist_ok=True) - - for file_name, file_url in file_urls.items(): - save_path = load_file_from_url(url=file_url, model_dir=save_path_root, progress=True, file_name=file_name) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - - parser.add_argument( - 'method', - type=str, - help=("Options: 'CodeFormer' 'facelib'. Set to 'all' to download all the models.")) - args = parser.parse_args() - - file_urls = { - 'CodeFormer': { - 'codeformer.pth': 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth' - }, - 'facelib': { - # 'yolov5l-face.pth': 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/yolov5l-face.pth', - 'detection_Resnet50_Final.pth': 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/detection_Resnet50_Final.pth', - 'parsing_parsenet.pth': 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/parsing_parsenet.pth' - } - } - - if args.method == 'all': - for method in file_urls.keys(): - download_pretrained_models(method, file_urls[method]) - else: - download_pretrained_models(args.method, file_urls[args.method]) \ No newline at end of file diff --git a/spaces/Fengbinbin/gpt-academic/Dockerfile b/spaces/Fengbinbin/gpt-academic/Dockerfile deleted file mode 100644 index da5053dbc7fc0accbd7b10fab87ca72feced8fe8..0000000000000000000000000000000000000000 --- a/spaces/Fengbinbin/gpt-academic/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -# 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM -# 如何构建: 先修改 `config.py`, 然后 docker build -t gpt-academic . -# 如何运行: docker run --rm -it --net=host gpt-academic -FROM python:3.11 - -RUN echo '[global]' > /etc/pip.conf && \ - echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \ - echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf - - -WORKDIR /gpt -COPY requirements.txt . -RUN pip3 install -r requirements.txt - -COPY . . - -# 可选步骤,用于预热模块 -RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()' - -CMD ["python3", "-u", "main.py"] diff --git a/spaces/Ferion/image-matting-app/ppmatting/models/backbone/vgg.py b/spaces/Ferion/image-matting-app/ppmatting/models/backbone/vgg.py deleted file mode 100644 index 64b529bf0c3e25cb82ea4b4c31bec9ef30d2da59..0000000000000000000000000000000000000000 --- a/spaces/Ferion/image-matting-app/ppmatting/models/backbone/vgg.py +++ /dev/null @@ -1,166 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle -from paddle import ParamAttr -import paddle.nn as nn -import paddle.nn.functional as F -from paddle.nn import Conv2D, BatchNorm, Linear, Dropout -from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D - -from paddleseg.cvlibs import manager -from paddleseg.utils import utils - - -class ConvBlock(nn.Layer): - def __init__(self, input_channels, output_channels, groups, name=None): - super(ConvBlock, self).__init__() - - self.groups = groups - self._conv_1 = Conv2D( - in_channels=input_channels, - out_channels=output_channels, - kernel_size=3, - stride=1, - padding=1, - weight_attr=ParamAttr(name=name + "1_weights"), - bias_attr=False) - if groups == 2 or groups == 3 or groups == 4: - self._conv_2 = Conv2D( - in_channels=output_channels, - out_channels=output_channels, - kernel_size=3, - stride=1, - padding=1, - weight_attr=ParamAttr(name=name + "2_weights"), - bias_attr=False) - if groups == 3 or groups == 4: - self._conv_3 = Conv2D( - in_channels=output_channels, - out_channels=output_channels, - kernel_size=3, - stride=1, - padding=1, - weight_attr=ParamAttr(name=name + "3_weights"), - bias_attr=False) - if groups == 4: - self._conv_4 = Conv2D( - in_channels=output_channels, - out_channels=output_channels, - kernel_size=3, - stride=1, - padding=1, - weight_attr=ParamAttr(name=name + "4_weights"), - bias_attr=False) - - self._pool = MaxPool2D( - kernel_size=2, stride=2, padding=0, return_mask=True) - - def forward(self, inputs): - x = self._conv_1(inputs) - x = F.relu(x) - if self.groups == 2 or self.groups == 3 or self.groups == 4: - x = self._conv_2(x) - x = F.relu(x) - if self.groups == 3 or self.groups == 4: - x = self._conv_3(x) - x = F.relu(x) - if self.groups == 4: - x = self._conv_4(x) - x = F.relu(x) - skip = x - x, max_indices = self._pool(x) - return x, max_indices, skip - - -class VGGNet(nn.Layer): - def __init__(self, input_channels=3, layers=11, pretrained=None): - super(VGGNet, self).__init__() - self.pretrained = pretrained - - self.layers = layers - self.vgg_configure = { - 11: [1, 1, 2, 2, 2], - 13: [2, 2, 2, 2, 2], - 16: [2, 2, 3, 3, 3], - 19: [2, 2, 4, 4, 4] - } - assert self.layers in self.vgg_configure.keys(), \ - "supported layers are {} but input layer is {}".format( - self.vgg_configure.keys(), layers) - self.groups = self.vgg_configure[self.layers] - - # matting的第一层卷积输入为4通道,初始化是直接初始化为0 - self._conv_block_1 = ConvBlock( - input_channels, 64, self.groups[0], name="conv1_") - self._conv_block_2 = ConvBlock(64, 128, self.groups[1], name="conv2_") - self._conv_block_3 = ConvBlock(128, 256, self.groups[2], name="conv3_") - self._conv_block_4 = ConvBlock(256, 512, self.groups[3], name="conv4_") - self._conv_block_5 = ConvBlock(512, 512, self.groups[4], name="conv5_") - - # 这一层的初始化需要利用vgg fc6的参数转换后进行初始化,可以暂时不考虑初始化 - self._conv_6 = Conv2D( - 512, 512, kernel_size=3, padding=1, bias_attr=False) - - self.init_weight() - - def forward(self, inputs): - fea_list = [] - ids_list = [] - x, ids, skip = self._conv_block_1(inputs) - fea_list.append(skip) - ids_list.append(ids) - x, ids, skip = self._conv_block_2(x) - fea_list.append(skip) - ids_list.append(ids) - x, ids, skip = self._conv_block_3(x) - fea_list.append(skip) - ids_list.append(ids) - x, ids, skip = self._conv_block_4(x) - fea_list.append(skip) - ids_list.append(ids) - x, ids, skip = self._conv_block_5(x) - fea_list.append(skip) - ids_list.append(ids) - x = F.relu(self._conv_6(x)) - fea_list.append(x) - return fea_list - - def init_weight(self): - if self.pretrained is not None: - utils.load_pretrained_model(self, self.pretrained) - - -@manager.BACKBONES.add_component -def VGG11(**args): - model = VGGNet(layers=11, **args) - return model - - -@manager.BACKBONES.add_component -def VGG13(**args): - model = VGGNet(layers=13, **args) - return model - - -@manager.BACKBONES.add_component -def VGG16(**args): - model = VGGNet(layers=16, **args) - return model - - -@manager.BACKBONES.add_component -def VGG19(**args): - model = VGGNet(layers=19, **args) - return model diff --git a/spaces/FridaZuley/RVC_HFKawaii/lib/uvr5_pack/lib_v5/layers_33966KB.py b/spaces/FridaZuley/RVC_HFKawaii/lib/uvr5_pack/lib_v5/layers_33966KB.py deleted file mode 100644 index a38b7bb3ae3136b07eadfc2db445fef4c2de186b..0000000000000000000000000000000000000000 --- a/spaces/FridaZuley/RVC_HFKawaii/lib/uvr5_pack/lib_v5/layers_33966KB.py +++ /dev/null @@ -1,126 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from . import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class SeperableConv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(SeperableConv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nin, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - groups=nin, - bias=False, - ), - nn.Conv2d(nin, nout, kernel_size=1, bias=False), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) - - def __call__(self, x): - skip = self.conv1(x) - h = self.conv2(skip) - - return h, skip - - -class Decoder(nn.Module): - def __init__( - self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False - ): - super(Decoder, self).__init__() - self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - h = self.conv(x) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) - self.conv3 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[0], dilations[0], activ=activ - ) - self.conv4 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[1], dilations[1], activ=activ - ) - self.conv5 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.conv6 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.conv7 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.bottleneck = nn.Sequential( - Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1) - ) - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate( - self.conv1(x), size=(h, w), mode="bilinear", align_corners=True - ) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - feat6 = self.conv6(x) - feat7 = self.conv7(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1) - bottle = self.bottleneck(out) - return bottle diff --git a/spaces/Froleptan/stablediffusion-infinity/css/w2ui.min.css b/spaces/Froleptan/stablediffusion-infinity/css/w2ui.min.css deleted file mode 100644 index 1e9a927d8c4521c622ab3de6cd6747daf306d378..0000000000000000000000000000000000000000 --- a/spaces/Froleptan/stablediffusion-infinity/css/w2ui.min.css +++ /dev/null @@ -1,2 +0,0 @@ -/* w2ui 2.0.x (nightly) (10/10/2022, 1:43:34 PM) (c) http://w2ui.com, vitmalina@gmail.com */ -@font-face{font-family:w2ui-font;src:url("data:application/x-font-woff;charset=utf-8;base64,d09GRgABAAAAAAnsAAsAAAAADpwAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAABHU1VCAAABCAAAADsAAABUIIslek9TLzIAAAFEAAAAQQAAAFZdKW6PY21hcAAAAYgAAACdAAACJimbHahnbHlmAAACKAAABYQAAAd0bnTEjmhlYWQAAAesAAAAMQAAADYiTbc3aGhlYQAAB+AAAAAYAAAAJA3eCBFobXR4AAAH+AAAABAAAAA8cA4AAGxvY2EAAAgIAAAAIAAAACALUg1CbWF4cAAACCgAAAAfAAAAIAEfAGBuYW1lAAAISAAAAS4AAAIibo8QqHBvc3QAAAl4AAAAdAAAAJs0xq68eJxjYGRgYOBiMGCwY2BycfMJYeDLSSzJY5BiYGGAAJA8MpsxJzM9kYEDxgPKsYBpDiBmg4gCACY7BUgAeJxjYGSvYJzAwMrAwCrCIsXAwHAJQjNdYPBkbAbSDKzMDFhBQJprCoMDgyODH+sdILedbRWDGZBmBMkBAGbXCH0AAAB4nL3RWQ6DMAwE0AkJISzhJD1Cf+i+cg6+er7ejo6juUGrWnqWcBYRG0ANwNOGAuBecLBYWHWl7tGVesC27AmlPq8r81Sys5PMFfcG3hjRIKHluR4DMkYuRnwfrvKhjk1qu37I4w8u/HMMJb/1ZQ+YxDq6k4r2YpM5iPX4KDa1k1hnz9LQRRJdpaWbcJq4S08Psb97SqZZxg9PnRB8AAAAeJyFVFtMFFcYPv/MzmypZmHYnb0IO8vMylJghdmdvbBZLmLBSkE0QXAbRV9AwMSUWKHBJpJYo9HSiBaaErEP0gdtYxOV2ESgadMHqW2qMX3woQhp0trYWBPS1JbdPfafWSpVm/Rkzjnf/Ldz/tshDMHBf8sdIDnERgj4vLLCi4LNrsnBSFgIsbIg8xMvxZKHYlu3xkyHY1v3JmtMX3INJS3x5PX4tm1xUyye7DG9jmZAt2Vu4V4jHCFZjDkL+PHUzfSjK8w0+wuXSH0Ii5+m600eXYwzZL1cA8kiVuIm60hEP53PBt7uyAKr2QIOsBaVARfxlSPdzEJhEW7I9oA9EoWIDyxQBtUgAXy/pk7sEfk1zBH69cyo5LTADuFlSw2zDsqVCSUAVmiDM6ek2pz0H9mihCCbeTFHTN/JWV8penCyi6h8yFVnT99nPoH+Ost6Kc0zv73ncXpQm4VyOTXFnGl9NdsmpXjUbkQr7F9SbcouGfqe9YQ14vgu14TITFahL5rgfTL75+eZvIUFJn9+nmta+oqrysx/Ysbv5V4hXvyx2zBodhsGQSnC6CnoeSiahUsNRIIYFlz4hCDQOFzJzW0R1FyaoAncWgQBLtNmQWjJVQU4D9eQZvoxF/k7aacQQHIujNEuXScgwBicy9DoNrgkCJlc8A+5ueW7e0kl3kUWZYc+NdGLcS7yFWBKzHyOQwKHvSBaDdFIDihFOidk/AUNjs2QYt5gC1IL7zCW9OKpBw/YwtjNY0P01tCxm7F4XMegDh29FYuncwcP9F5UAwH1Yu8BugLZ8+/sGuzcxYw+UXhaOfXoWY0MRDdMhDy+YVb41SSbuEiQNKInoUgcgnY32PhSUHxhkMOYk7AsaKzyvE+GR2zGsRyDVGCwcwxR9qzfE/T4jQX8tLejA/wdsxOjJ+9W1tRU3j05ChtHhjN4eGTiyEDfZEDTApN9A/DCCjapyxb8nnS7boD2mpqqq1CbXhsZnquqqamaGx6BjWi1qnqp7Tl9+qeBdV8xbyP8x8RB1qKvFXrWQmWgWEB8uot9hUEJbBZQyiBUDYCuixleFWhZEHWAg++kieD2PduDNGHNz7eaZnFNH3f5XS6/VupylTJbku9P59uY07a8mfQ+ervtDoy0cXPRXVH8JHHplihJIqeK0p78soryvLzyirL8pb2mG2N2SbKPJe/AW+2fwdvtJPPefMSdwttb8b5RKAIWY22NghfYQgiDmU1cYLa0pi/f9mzecPgCLKUpn2abYInyXUx9K9Pcmr5E/Rs2e6C7SyemJtO4Ue78ci+NcX3Eh5ZtZt7Me2VhOeuCrPiqsBQ0WcjUgyjIuPAf8Pxqp0IrVKXNq9KYoqoKXFe9bYoKs4rKfG61iquctMKgz6rKdqRf96qql8ZU5cmbZ+YGSSH+KPqZZm8WyFg41RCUGJuF8YrBf5WWqf/cyRM72u2L0ACb6EPLpljD9Hczp5ubT890HT3YdzWoacGrfQe5Q40Ng4fP0uPw5uYTFQ06W5caQmb/AKwe6EdB/WiM5+Mk/zv3Aykm1YQUYquaI9FIOGQclqlvm1HAWjAaieLjig9mNePD1vXpnRvRG9eu9y3enP+Cns2rcNUqa/2l453dP/V0jJf6EXb07O/avbMRnE4oW1NXr1nsqe7ORGsgFAq0Jr5BgHXZmliAMqeTadq5u2t/d2dGEW0YcK1S66rIo+P5dotWXwf7tGW9FQNG6pbfoUGs5zyikSjZQlowquGQD1PIu/UnCesWk4d1K4e5sCbCf7Pk/+OxE8XuX93F9B64DMA2Jc9MMadK3PfdxalJtqkYQQm46D368zQtfVY2NUnvTXGyw11c7HZMO9wlJbjhoDQDlxkrlCmnTnFO4SDkby6j4OB4nGNgZGBgAOJJO18HxPPbfGXgZr0DFGG4O23bVwT9/xQHI9sqIJeDgQkkCgCOwA3cAAAAeJxjYGRgYL3DAAQcjFASTCMBfgAdRwEFeJxjYGBg4GAkDwMADvIAfwAAAAAAJgA8AKQAwAECAWgBaAHeAjgCZgKgAuADRgO6eJxjYGRgYOBnCGFgYwABJiDmAkIGhv9gPgMAEp0BgAB4nG2PQU7CQBSG/0LBCIkhmpi4m7hwYyjQBQsOAHsW7AtMoaR0mukA4QKewDN4Bk/g0jN4FP+Wly6UaTr53vf+N+0A6OELHsrl4bbay9XADasLN0k9YZ/8JNxCF8/Cbfq+cAevGAt38YCIJ3h+edo9nHADd3gTbtK/C/vkD+EWHvEp3Kb/Fu5ggR/hLl688Sk8JP3YZG6uN4c0snVdw0LbIjGZGgXD2s10pm3k9Fotz6o4bkLnYhVbs1dTdnWaGpVbs9MrF2ydyyeDQSw+WJk9TghxQMJbxzDIeLM5NDZ0KW9sr/T/mwUnLAq6slYYIcDwSm7GXFZlI1Yaa2aXOHMvcOQ3Q1rHtOJrObMnTWVW839SskJe9XY0K/oA22oqxwQDPvGffMAUT/oFXxtfYgAAeJxtxcsOwiAQBVBuC7Q+6S/idLSNlCEMTfTvNXHr2RzTmZ9g/gvo0MPCwWPAiAOOOOGMC64ImEx/k5ejhenpSZJUHb7tW1ZHVVTtXKU43kp72zXfxZWojX3hTGuyJe3qKyeJs1eOlZZRubU1P9SYD+7xIE8=") format("woff");font-weight:400;font-style:normal}[class*=" w2ui-icon-"]:before,[class^=w2ui-icon-]:before{font-family:w2ui-font;display:block;vertical-align:middle;line-height:1;font-weight:400;font-style:normal;speak:none;text-decoration:inherit;text-transform:none;text-rendering:optimizeLegibility;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.w2ui-icon-box:before{content:"A"}.w2ui-icon-check:before{content:"B"}.w2ui-icon-colors:before{content:"C"}.w2ui-icon-columns:before{content:"D"}.w2ui-icon-cross:before{content:"E"}.w2ui-icon-drop:before{content:"F"}.w2ui-icon-empty:before{content:"G"}.w2ui-icon-info:before{content:"H"}.w2ui-icon-paste:before{content:"I"}.w2ui-icon-pencil:before{content:"J"}.w2ui-icon-plus:before{content:"K"}.w2ui-icon-reload:before{content:"L"}.w2ui-icon-search:before{content:"M"}.w2ui-icon-settings:before{content:"N"}@font-face{font-family:OpenSans;src:url("data:application/x-font-ttf;charset=utf-8;base64,AAEAAAARAQAABAAQR0RFRgt8DNQAAXd0AAAALkdQT1MAGQAMAAF3pAAAABBHU1VC47MpuAABd7QAAALuT1MvMqE2nskAAUdAAAAAYGNtYXCuu/X7AAFHoAAAA4hjdnQgD00YpAABU+gAAACiZnBnbX5hthEAAUsoAAAHtGdhc3AAFQAjAAF3ZAAAABBnbHlmdDiZSwAAARwAAS+0aGVhZAK6Y3AAAThIAAAANmhoZWENzAlzAAFHHAAAACRobXR46DU83QABOIAAAA6abG9jYSkU3PEAATDwAAAHVm1heHAFQwIKAAEw0AAAACBuYW1lW5KAHwABVIwAAAPScG9zdH+4CW8AAVhgAAAfA3ByZXBDt5akAAFS3AAAAQkAAgDBAAAECgW2AAMABwAVtwQDBQIEAwcAAC8yLzMBLzMvMzEwEyERITchESHBA0n8t2gCef2HBbb6SmgE5gACAJj/4wGJBbYAAwAOACtAFAMJCQIEBA8QAQEMAgwGT1kMFgIDAD8/KxESADkYLxESATkRMzMRMzEwASMDMwM0MzIWFRQGIyImAUZpM8/heDo/QDk0RAGTBCP6tIhGQkBHPwAAAgCFA6YCsAW2AAMABwAfQA0AAwcEAwQICQYCBwMDAD8zzTIREgE5OREzETMxMAEDIwMhAyMDAT8oaSkCKyloKQW2/fACEP3wAhAAAAIAMwAABPYFtgAbAB8AmUBVCB8cFQQUCREMDAkSDw4LBAoTExQWHR4HBAYXBAEAGQQYBQUGFAYKIQMaFwMYChggIQgEDA0MTlkcAQ0fABAREE5ZGRURTw0BTxEBDRENEQUXEwMKBQAvMz8zEjk5Ly9dXREzMysRADMzETMzKxEAMzMREgE5OREXMxESOTkRMxESFzkREhc5ETMREhc5MjIRMxESFzkxMAEDIRUhAyMTIQMjEyE1IRMhNSETMwMhEzMDIRUBIRMhA9VCARv+zVSJVP7RUohQ/voBH0T+6wErUotSATFUhlQBCPzlAS9C/tEDg/6sgf5SAa7+UgGugQFUfwG0/kwBtP5Mf/6sAVQAAwCD/4kEDAYSACAAJgAtAGZANScRJR0XBAQqFA0FIQAAGQURCQUuLyUNBg1NWQMGJA4qDkxZHSorHBQcTVkXKhQGFAYUBRYFAC8vEjk5Ly8SOTIrEQAzETMrEQAzETMrEQAzERIBFzkRMxEzMzMzETMzMxEzMTABFAYHFSM1IiYnNRYWMxEmJjU0Njc1MxUWFwcmJxEeAgc0JicRNgEUFhcRBgYEDMy3gXDSQ1PZWc2ly6eBuKs0lZqdnEqqWYDZ/d1ab2NmAcGIsRfo3yMfnCUvAbhBrIiDqBK2tAVFgzsL/k4yX3tlSFks/nseAwdMXCkBgxBdAAAFAGj/7AYtBcsACQAVACEALQAxAEVAJAAQBQoWKBwiIi4oCjAQBjIzAw0fKw0rDSswMQYwGBklGQcTBwA/Mz8zPz8SOTkvLxEzETMREgEXOREzETMRMxEzMTATFBYzMhEQIyIGBRQGIyImNTQ2MzIWARQWMzI2NTQmIyIGBRQGIyImNTQ2MzIWAQEjAfJKU6SkU0oBypmUjJuVkpGcAaZKVFRQUFRUSgHLmZSOmZWSjp/+/vzVkwMrBAKqqgFUAVKoquTp7t/j5u7826upp62rpaWr4+nu3uPm6wMg+koFtgAAAwBx/+wF0wXNAAsAFQA1AFFAMBMWAB0GIyorListIw4mGR0WCTY3MwxJWTMTDyctDjAFLwMZJgMqKiAvEiAJSlkgBAA/KwAYPxI5Lxc5Ehc5PysREgEXOREzETMRMxEzMTABFBYXNjY1NCYjIgYTMjcBDgIVFBYlNDY3LgI1NDYzMhYVFAYHATY2NzMCBwEjJwYGIyImAZ5IV4FlZ1ZZb5vxn/5Lb1wsm/65i7RVPSTEr6K6iJ0BlzhDF6hEiQEr5bl29JbX7QSTRX1YS39TTWFg+52aAahEWWZBdYn6gshmX2JqOZaop5VrtV3+eT6nY/7ilP7dsmpc1AAAAQCFA6YBPwW2AAMAFLcAAwMEBQIDAwA/zRESATkRMzEwAQMjAwE/KGkpBbb98AIQAAABAFL+vAIhBbYADQAcQAwHAAoEAAQODwsnAwMAPz8REgE5OREzETMxMBMQEjczBgIVFBIXIyYCUpuSopCRlIugk5oCMQEJAc6uwf4y9PD+Nr2qAcYAAAEAPf68AgwFtgANABxADAQKBwAKAA4PCgMEJwA/PxESATk5ETMRMzEwARACByM2EjU0AiczFhICDJuSoIuUkZCik5oCMf75/jqovAHL8PQBzsGv/jEAAQBWAn8EDgYUAA4AMEAbAwUEAQcNCgkLCQ8QBAoBDQIMDA0KBwQGCA4AAD/EMhc5ETMRMxEzERIBFzkxMAEDJRcFEwcDAycTJTcFAwKRKwGOGv6D+KywoLDy/ocdAYcrBhT+dW+2H/66XgFq/pZeAUYftm8BiwAAAQBoAOMEKQTDAAsAKEATAAQECQUFDA0DBwgHUFkADwgBCAAvXTMrEQAzERIBOREzMxEzMTABIRUhESMRITUhETMCjQGc/mSL/mYBmosDF4r+VgGqigGsAAEAP/74AW0A7gAIABG1BQAJCgUAAC/NERIBOTkxMCUXBgIHIzYSNwFeDxpiNX0bQQ3uF2T+93JoATJcAAEAVAHZAj8CcQADABG1AgAFBAABAC8zERIBOTkxMBM1IRVUAesB2ZiYAAEAmP/jAYkA8gALABhACwYAAAwNCQNPWQkWAD8rERIBOREzMTA3NDYzMhYVFAYjIiaYPTk6QUI5M0NqQ0VFQ0FGPwAAAQAUAAAC2wW2AAMAE7cCAAQFAwMCEgA/PxESATk5MTABASMBAtv936YCIQW2+koFtgACAGb/7AQtBc0ACwAXAChAFBIADAYABhkYCRVLWQkHAw9LWQMZAD8rABg/KxESATk5ETMRMzEwARACIyICERASMzISARASMzISERACIyICBC3v9uz27vTu9/zhlqSmlZWmpJYC3f6F/ooBfwFyAX4Bcv5+/pL+wf7dAScBOwE7ASX+3wABALwAAALLBbYACgAkQBAJAAEIAQsMBAkHBwEJBgEYAD8/EjkvEjkREgE5OREzMzEwISMRNDcGBgcnATMCy6IIFTTUWAGDjAQSgnQVLqxyASsAAQBkAAAEJQXLABkAK0AXGAEHEwATDgEEGhsQCktZEAcBGExZARgAPysAGD8rERIBFzkRMxEzMTAhITUBPgI1NCYjIgYHJzYzMhYVFAIHARUhBCX8PwGBsHA4jn5bo2RYyu7O6pzW/sAC8I8Bg7KYkFN1iTxPcajTsov+8ND+xwgAAAEAXv/sBBsFywAnAENAJBsAEwcHAAMWIg0GKCkDFxYXFktZFxcKJSUeS1klBwoRS1kKGQA/KwAYPysREgA5GC8rERIAORESARc5ETMRMzEwARQGBxUWFhUUBCEiJic1FhYzIBEQISM1MzI2NTQmIyIGByc2NjMyFgPunZCwqv7e/vV0wVtf12ABe/5ekJKryJN+YKptVFrrgtXsBF6Msh4IFrSS0eEjLJ4vMQEpAQqPl4ZrejRGcEdRwwAAAgArAAAEagW+AAoAEgA8QB4SBQkCAgsHAwADBQMTFAEFEgVMWQkPBxISAwcGAxgAPz8SOS8SOTMrEQAzERIBFzkRMzMzETMRMzEwASMRIxEhNQEzETMhETQ3IwYHAQRq2Z/9OQK2sNn+iAoIMCr+NwFQ/rABUJED3fwpAeaPtGA//XYAAQCF/+wEHQW2ABoAOkAfDwMZFAgUFwMEHBsAEUtZAAAGFRUYTFkVBgYMS1kGGQA/KwAYPysREgA5GC8rERIBFzkRMxEzMTABMgQVFAAjIic1FhYzMjY1ECEiBycTIRUhAzYCLecBCf7f/veCRtBlsMP+iV+fVjcC1/23JXMDfeXH4/7+T6AtM6adATIdNwKsmf5JFwAAAgB1/+wELwXLABYAJABEQCMaEQshIQAABhEDJiUMCw4dTVkLDg4UAxQXS1kUGQMITVkDBwA/KwAYPysREgA5GC85KxEAMxESARc5ETMRMxEzMTATEAAhMhcVJiMiAgMzNjMyFhUUAiMiAAUyNjU0JiMiBgYVFBYWdQFPAUhxQU1j6/gMDG7uxeP51OP+9gHrjp2SkVqWWVCTAnEBrwGrE48Z/tv+xqzuzOT++wFVyLOpkaZKgkZnsmgAAQBeAAAEKwW2AAYAH0AQAQUFAAIDBwgDAkxZAwYAGAA/PysREgEXOREzMTAhASE1IRUBAR0CXvzjA839qgUdmYX6zwADAGj/7AQpBcsAFgAiAC4ATUApFw8mFCwDHQkJAwYRFA8GLzAGESkgKSBLWSkpDAAMGk1ZDBkAI01ZAAcAPysAGD8rERIAORgvKxESADk5ERIBFzkRMxEzETMRMzEwATIWFRQGBxYWFRQGIyImNTQlJiY1NDYDFBYzMjY1NCYnBgYBIgYVFBYXNjY1NCYCSMjqhpOylv7d6vwBMop463enl5WmnMKVhgE6fY52n493kQXLuqRssklVu3u22c28+4xOtXCfvfumeIaMemGXR0CbA2d4ZFyEQjyKXGV3AAACAGr/7AQlBcsAFwAlAEFAIhsRIgoKAAAEEQMmJw4eTVkLFA4OAhQUGEtZFAcCB01ZAhkAPysAGD8rERIAORgvEjkrERIBFzkRMxEzETMxMAEQISInNRYzMhITIwYGIyImNTQSMzIWEgEiBhUUFjMyNjY1NCYmBCX9aHREUGbw9QsMN7ZywuT/0JXfeP4Uj5yQk1uZWFKTA0b8phSPGgEpATNTV+jQ5AEImf7bATC4pJClSoBGabJmAAACAJj/4wGJBGQACwAVAChAFBAGBgwAABYXDhNPWQ4QCQNPWQkWAD8rABg/KxESATkRMzMRMzEwNzQ2MzIWFRQGIyImETQzMhUUBiMiJpg9OTpBQjkzQ3Z7QjkzQ2pDRUVDQUY/A7uHh0FGPwACAD/++AGFBGQACAASACJAEAENDQUJCRQTCxBPWQsQBQAAL80/KxESATkRMzMRMzEwJRcGAgcjNhI3AzQzMhUUBiMiJgFeDxpiNX0bQQ0Vd3tCOTo97hdk/vdyaAEyXALvh4dBRkYAAAEAaADyBCkE2QAGABVACQQABQEEBwgDAAAvLxESARc5MTAlATUBFQEBBCn8PwPB/PIDDvIBpmIB35X+jf64AAACAHcBwQQZA+MAAwAHACpAFQcCBAACAAkIBAVQWQQBAFBZDwEBAQAvXSsAGC8rERIBOTkRMxEzMTATNSEVATUhFXcDovxeA6IDWomJ/meJiQAAAQBoAPIEKQTZAAYAFUAJBQECAAQHCAYDAC8vERIBFzkxMBMBATUBFQFoAw/88QPB/D8BiQFGAXWV/iFi/loAAAIAG//jAzkFywAbACYAOUAdIRwbAAcTEwAcDgQnKAAAJBAkHk9ZJBYQCklZEAQAPysAGD8rERIAORgvERIBFzkRMxEzETMxMAE1NDY3NjY1NCYjIgYHJzYzMhYVFAYGBwYGFRUDNDMyFhUUBiMiJgEhSGKIR4N7T5ZhO73Ov9QnTH5lQbJ4Oj9AOTREAZM2dZdUc3RSZm8lMYdjvKtJb2NuVnJfIf7XiEZCQEc/AAIAef9GBrgFtAA1AD8ARUAiIy42DjsHFBsAACkUDi4FQEEYODgEPQgRCxELESsfMgMmKwAvMz8zEjk5Ly8SOTIzMxEzERIBFzkRMxEzMxEzETMxMAEUBgYjIiYnIwYGIyImNTQ2MzIWFwMVFDMyNjU0AiQjIgQCFRAAITI3FQYjIAAREBIkITIEEgEUMzITEyYjIgYGuFigaFZ2CwgolWaWqezARKxFGYVbcpT+77Hf/rauAUIBL9LiwPT+lf5v1gGMAQDXAU+3+/bDzxIOSFWCkwLZjuyCaFFXYs2wzP8ZFv4qFrLXrLUBEJO5/qnh/s/+uFaFVAGPAWYBBAGW37X+s/6k/gE5AQUUtAACAAAAAAUQBbwABwAOADlAHgIOCwgBBQADAAcDBAcEEA8OAklZCwUODgQFAwAEEgA/Mz8SOS8SOSsREgE5OREzETMREhc5MTAhAyEDIwEzAQEDJicGBwMEYLb9trSsAkKPAj/+ZaohIxYprAHR/i8FvPpEAmoBxVZ9YHP+OwADAMkAAAS+BbYADgAXACAASUAmEwQdCg8ZGQ4KBAcOBCEiCA8YDxhKWQ8PDgAOGUpZDhIAF0pZAAMAPysAGD8rERIAORgvKxESADkREgEXOREzETMRMxEzMTATISAEFRQGBxUEERQEIyETITI2NTQmIyMRESEyNjU0JiPJAZ0BIwEEkYsBTf737v4CqgEYtJ6wwPoBMbGzt7sFtq68gqkZCjn+28TcA0Rxhntt/ZH93YmSiIAAAAEAff/sBM8FywAWACZAFAMOFAkOAxcYEgBJWRIECwZJWQsTAD8rABg/KxESARc5ETMxMAEiABEQADMyNxUGIyAAETQSJDMyFwcmAzvx/ukBDfmZxJjf/r3+oakBP9jmrEimBTP+v/7p/uH+xzeVOQGIAWniAVS4VJJOAAACAMkAAAVYBbYACAARAChAFA4ECQAEABITBQ1KWQUDBA5KWQQSAD8rABg/KxESATk5ETMRMzEwARAAISERISAAAxAAISMRMyAABVj+d/6P/msBwAFVAXq0/uH+5ffPATABMgLp/pb+gQW2/ob+pwEeASL7cAErAAABAMkAAAP4BbYACwA6QB8GCgoBBAAIAQQMDQYJSVkGBgECAgVJWQIDAQpJWQESAD8rABg/KxESADkYLysREgEXOREzETMxMCEhESEVIREhFSERIQP4/NEDL/17Al79ogKFBbaX/imW/eYAAQDJAAAD+AW2AAkAMkAaBgAAAQMIAQMKCwYJSVkGBgECAgVJWQIDARIAPz8rERIAORgvKxESARc5ETMRMzEwISMRIRUhESEVIQFzqgMv/XsCXv2iBbaX/emXAAABAH3/7AU9BcsAGwA6QB8UCBkCAg4bCAQcHQAbSVkAAAUMDBFJWQwEBRdJWQUTAD8rABg/KxESADkYLysREgEXOREzETMxMAEhEQYGIyAAETQSJDMyFwcmIyAAERAAITI3ESEDTAHxdPCe/rT+jrcBWOfqykLGt/71/tQBIQEYmJH+uQL+/TklJgGLAWTkAVe1VpZU/sL+5v7Y/s4jAcIAAQDJAAAFHwW2AAsAM0AZCQEBAAgEBAUABQ0MCANJWQgIBQoGAwEFEgA/Mz8zEjkvKxESATk5ETMRMxEzETMxMCEjESERIxEzESERMwUfqvz+qqoDAqoCsP1QBbb9kgJuAAABAFQAAAJWBbYACwA3QBwFAQoDCAAAAwEDDA0JBAYESlkGAwoDAQNKWQESAD8rEQAzGD8rEQAzERIBFzkRMxEzETMxMCEhNTcRJzUhFQcRFwJW/f6srAICrKxiIwSqJWJiJftWIwAB/2D+fwFoBbYADQAdQA0LCAgODwkDAAVJWQAiAD8rABg/ERIBOREzMTADIic1FjMyNjURMxEUBgxeNkdNY2eqwP5/G5EUeHEFtvpYvtEAAAEAyQAABOkFtgALACpAFQgEBAUFAgsKAAUNDAIIBQkGAwEFEgA/Mz8zEjk5ERIBFzkRMxEzMTAhIwEHESMRMxEBMwEE6cj965mqqgKXyf20AsWI/cMFtv0rAtX9hQABAMkAAAP4BbYABQAfQA4DAAAEBgcBAwADSVkAEgA/KwAYPxESATk5ETMxMDMRMxEhFcmqAoUFtvrkmgABAMkAAAZxBbYAEwAyQBgIBQUGCw4ODQYNFBUBChEDBgsHAw4ABhIAPzMzPzMSFzkREgE5OREzETMRMxEzMTAhASMWFREjESEBMwEzESMRNDcjAQNQ/hAIDp0BAAHPCAHT/qoOCP4MBRCa1PxeBbb7SgS2+koDrqK++vIAAQDJAAAFPwW2ABAALkAVCQYGBwEPDwAHABESCwMHDwgDAQcSAD8zPzMSOTkREgE5OREzETMRMxEzMTAhIwEjFhURIxEzATMmAjcRMwU/wvzhCBCdwAMdCAIOAp8Ey9i0/MEFtvs6GwElPwNHAAACAH3/7AW+Bc0ACwAXAChAFBIADAYABhkYCRVJWQkEAw9JWQMTAD8rABg/KxESATk5ETMRMzEwARAAISAAERAAISAAARASMzISERACIyICBb7+nf7E/r3+oQFgAUQBOwFi+3P98fP49/Lz/QLd/qH+bgGLAWgBZQGJ/nD+oP7X/s0BMgEqAScBMf7NAAIAyQAABGgFtgAJABIANEAaCgUFBg4ABgATFAoESlkKCgYHBxJKWQcDBhIAPz8rERIAORgvKxESATk5ETMRMxEzMTABFAQhIxEjESEgATMyNjU0JiMjBGj+0f7mrKoBewIk/QuZ4sq+yb4EDN7v/cEFtv0bkqGRjgAAAgB9/qQFvgXNAA8AGwA0QBsQChYAAAQDCgQcHQMNBw0ZSVkNBAcTSVkFBxMAP8YrABg/KxESADkREgEXOREzETMxMAEQAgcBIwEHIAAREAAhIAABEBIzMhIREAIjIgIFvuLOAVz3/uM3/r3+oQFgAUQBOwFi+3P98fP49/Lz/QLd/uf+jEL+lgFKAgGLAWgBZQGJ/nD+oP7X/s0BMgEqAScBMf7NAAIAyQAABM8FtgAMABUASEAlDQEBAgwJEQcLCgoHCQIEFhcJDQANAEpZDQ0CAwMVSVkDAwsCEgA/Mz8rERIAORgvKxESADkREgEXOREzETMRMxEzETMxMAERIxEhIAQVEAUBIwElMzI2NTQmIyMBc6oBkQENAQH+2gGNyf6e/s/ptKirvd0CYP2gBbbOz/7eZv1vAmCSj4+RgAABAGr/7AQCBcsAJAA0QBseEwwAABgTBQQlJgweAxYWG0lZFgQDCUlZAxMAPysAGD8rERIAOTkREgEXOREzETMxMAEUBCMgJzUWFjMyNjU0JiYnJiY1NDYzMhcHJiMiBhUUFhYXFhYEAv7o8P78jFrUaKqsPY+SzK/+0dq3NbWrh5g4hYnmrQGFwdhDpCYsgXNMYVI0ScihqchQlEx0Z0xhUTFSvAAAAQASAAAEWgW2AAcAJEASAAEFAQMDCAkHAwQDSVkEAwESAD8/KxEAMxESARc5ETMxMCEjESE1IRUhAouq/jEESP4xBR+XlwAAAQC6/+wFGQW2ABEAJUAREAEKBwEHExIRCAMEDUlZBBMAPysAGD8zERIBOTkRMxEzMTABERQAISAANREzERQWMzI2NREFGf7S/vj++P7fqsjCucgFtvxO+v7iASD8A678RrfExbgDuAABAAAAAATDBbYACgAaQAsBBAwLCAMABAMDEgA/PzMSORESATk5MTABMwEjATMBFhc2NwQMt/3xqP30tAFQOiIkOgW2+koFtvxOo5qioQABABsAAAdMBbYAGQAkQBAZChsaFQ4OBQkYEQoDAQkSAD8zPzMzEjk5ETMREgE5OTEwISMBJiYnBgcBIwEzExYXNjcBMwEWFzY3EzMFxaj+2RU0ARYw/uKo/nu05zAWGzUBBrQBEzAhEzXmtAPTQcYUhJ38MwW2/Hm+mrevA3n8f5vDjswDhQAAAQAIAAAElgW2AAsAI0ASBAYFCwoABg0MAggECQYDAQQSAD8zPzMSOTkREgEXOTEwISMBASMBATMBATMBBJbB/nf+cLQB5v47vAFrAW61/jsCg/19AvwCuv29AkP9TAAAAQAAAAAEewW2AAgAIEAPBAUCBQcDCQoABQEHAwUSAD8/MxI5ERIBFzkRMzEwAQEzAREjEQEzAj0Bhrj+GKz+GboC2wLb/IH9yQIvA4cAAQBSAAAEPwW2AAkAK0AXCAEDBwAHBAEECgsFBElZBQMBCElZARIAPysAGD8rERIBFzkRMxEzMTAhITUBITUhFQEhBD/8EwMI/RADv/z4Ax6FBJiZhftpAAEApv68Am8FtgAHACBADgYBBAABAAgJBQIDBgEnAD8zPzMREgE5OREzETMxMAEhESEVIREhAm/+NwHJ/t8BIf68BvqN+iEAAAEAFwAAAt0FtgADABO3AwEEBQMDAhIAPz8REgE5OTEwEwEjAboCI6b94AW2+koFtgAAAQAz/rwB/AW2AAcAIEAOAwABBgAGCAkABycDBAMAPzM/MxESATk5ETMRMzEwFyERITUhESEzASH+3wHJ/je2Bd+N+QYAAAEAMQInBCMFwQAGABhACQADBwgFAgAEAgAvLzMSORESATk5MTATATMBIwEBMQGyYwHdmP6M/rICJwOa/GYC6f0XAAH//P7FA5r/SAADABG1AAUBBAECAC8zEQEzETMxMAEhNSEDmvxiA57+xYMAAQGJBNkDEgYhAAkAE7YABAsKBoABAC8azRESATk5MTABIyYmJzUzFhYXAxJuQbIoyyByLATZNMA/FUW1NQACAF7/7APNBFoAGQAkAEdAJSIICx4eGRkSCAMlJgECCx5HWQILCwAVFQ9GWRUQBRpGWQUWABUAPz8rABg/KxESADkYLzkrEQAzERIBFzkRMxEzETMxMCEnIwYGIyImNRAlNzU0JiMiByc2NjMyFhURJTI2NTUHBgYVFBYDUiEIUqN6o7kCE7pveomtM1HBYcS9/g6bsabGr22cZ0momwFMEAZEgXtUfywyrsD9FHWqmWMHB21zWl4AAgCw/+wEdQYUABMAHwBEQCIKFxcPDwwdAwwDICENAAwVEhEKEQYABhpGWQYWABRGWQAQAD8rABg/KxESADk5ETMYPz8REgE5OREzETMRMxEzMTABMhIREAIjIiYnIwcjETMRFAczNhciBhUUFjMyNjU0JgKu2O/x1muxPAwjd6YICHTMqpaaqpmWlgRa/tn+8v7y/tVPUo0GFP6Gf2Wki8Pn58ff0dbSAAABAHP/7AOLBFwAFgAmQBQPAwMVCQMYFwYNRlkGEAASRlkAFgA/KwAYPysREgEXOREzMTAFIgAREAAzMhYXByYmIyARFBYzMjcVBgJm7v77AQn1T54tMzeCMv6yo6CJkG4UASUBDAETASwiF40WHf5Wytg7kzkAAgBz/+wENwYUABIAHwBCQCEdBhcADg4RBhEgIRIVDwAAAQEMAwkJGkZZCRADE0ZZAxYAPysAGD8rERIAOTkRMxg/PxESATk5ETMRMzMRMzEwJSMGIyICERASMzIXMycnETMRIyUyNjU1NCYjIgYVFBYDmglz5dfv8Nbfdw0HBKaH/p6qmZuqkpuak6cBJgEPAQ8BLKJPTQG++ex3uc4j6cfjz9LWAAIAc//sBBIEXAATABoAO0AfGAoXCwMDEQoDHBsXC0ZZFxcABgYURlkGEAAORlkAFgA/KwAYPysREgA5GC8rERIBFzkRMzMRMzEwBSIAERAAMzISFRUhFhYzMjcVBgYDIgYHITQmAn/z/ucBBdzO8P0NBbmosa1YnZyEnQ4CPYwUASgBBwEJATj+8d5pwchKlCYhA+WsmJ2nAAABAB0AAAMOBh8AFAA5QB0UDAwTAgIHAwUDFRYKD0ZZCgABBQcFRlkTBw8DFQA/PzMrEQAzGD8rERIBOTkRMzMRMzMSOTEwASERIxEjNTc1ECEyFwcmIyIGFRUhAp7+6abExAFhV3UrYEReWgEXA8f8OQPHSzw9AZQjhR99ikcAAAMAJ/4UBDEEXAAqADcAQQBuQD4rGTglDB89BTETARMFAioiHB8lGQpCQxwPNQ81RlkIO0dZCiIIKg8IDwgWKioCR1kqDyg/R1koEBYuR1kWGwA/KwAYPysAGD8rERIAOTkYLy8REjk5KysREgA5ERIBFzkRMxEzETMRMxEzMTABFQcWFhUUBiMiJwYVFBYzMzIWFRQEISImNTQ2NyYmNTQ2NyYmNTQ2MzIXARQWMzI2NTQmIyMiBhMUFjMyNTQjIgYEMcscLNzAMStqSlrCsr/+3P7o1+mAdCo5QEVVa9jGVkX+EZaM0clumMdxflqCdPP2dX4ESGkYI3FHocAIOFUtK5aPtr+gkmSSGhNQNTxaKiOobLTDFPsAWVx9a1lFbAM8c3bs934AAQCwAAAERAYUABYAM0AZDgwICAkAFgkWFxgOCRISBEZZEhAKAAAJFQA/Mz8/KxESADkREgE5OREzETMRMzMxMCERNCYjIgYVESMRMxEUBzM2NjMyFhURA556gq2fpqYICjG1dMnJAsWGhLzW/cMGFP4pVThPW7/Q/TUAAAIAogAAAWYF3wADAA8AI0ARCgAABAEBEBENB0hZDQIPARUAPz/OKxESATkRMzMRMzEwISMRMwM0NjMyFhUUBiMiJgFWpqa0OCooOjooKjgESAEpOTU2ODg3NwAAAv+R/hQBZgXfAAwAGAAsQBYTCwsNCAgZGhYQSFkWQAkPAAVGWQAbAD8rABg/Gs4rERIBOREzMxEzMTATIic1FjMyNjURMxEQAzQ2MzIWFRQGIyImK187RUNOSaa0OCooOjooKjj+FBmHFFVXBPz7EP68B105NTY4ODc3AAEAsAAABB0GFAAQADZAGxAOCgoLCwgGBAUIBBESDAAAEBAICAMHCxUDDwA/PzMSOS85ETM/ERIBFzkROREzETMzMTABNjcBMwEBIwEHESMRMxEUBwFUK1gBYsX+RAHbyf59faSkCAIxPWMBd/4t/YsCBmz+ZgYU/Mc3cwABALAAAAFWBhQAAwAWQAkAAQEEBQIAARUAPz8REgE5ETMxMCEjETMBVqamBhQAAQCwAAAGywRcACMARkAjFREREggJACMJEiMDJCUcFhUVEhkEDRkNRlkfGRATDwkAEhUAPzMzPz8zKxEAMxESORgvMzMREgEXOREzETMRMxEzMTAhETQmIyIGFREjETQmIyIGFREjETMXMzY2MyAXMzY2MzIWFREGJXB2m5SmcHeckaaHGwgvq2oBAU8IMbp3urkCyYODsrn9nALJg4O71f3BBEiWUFq6VmS/0v01AAABALAAAAREBFwAFAAxQBgAFAwICAkUCRYVDAkQEARGWRAQCg8ACRUAPzM/PysREgA5ERIBOTkRMxEzETMxMCERNCYjIgYVESMRMxczNjYzMhYVEQOeeoKsoKaHGwgzuHHGyALFhoS61v3BBEiWUVm/0v01AAIAc//sBGIEXAAMABgAKEAUEwANBwAHGhkKFkZZChADEEZZAxYAPysAGD8rERIBOTkRMxEzMTABEAAjIiYCNRAAMzIAARQWMzI2NTQmIyIGBGL+8u6T5HwBDO7mAQ/8vaijo6mppaOmAiX+9P7TigECrQEMASv+zv770tzb09HZ1gACALD+FAR1BFwAFAAhAD9AIBkLBAcHCB8SCBIiIwQLAA8PFUZZDxAJDwgbABxGWQAWAD8rABg/Pz8rERIAOTkREgE5OREzETMRMzMzMTAFIiYnIxYVESMRMxczNjYzMhIREAIDIgYHFRQWMzI2NTQmAq5rsTwMDKaHFwhAqm7a7fHuqJYCmqqOoaEUT1JgVv49BjSWWlD+1v7z/vL+1QPjussl58fmys3bAAIAc/4UBDcEXAAMAB8AREAiChAdFgMaGhkQGSAhGhsXDx0eHhYNExMHRlkTEA0ARlkNFgA/KwAYPysREgA5OREzGD8/ERIBOTkRMxEzMzMRMzEwJTI2NzU0JiMiBhUUFhciAhEQEjMyFzM3MxEjETQ3IwYCTqaYBZypkpuZfdTu8NbheQkYg6YLDXN3stMl5srjz8/ZiwEqAQsBDQEuqpb5zAHVZEanAAEAsAAAAycEXAAQACpAFA0JCQoKAhESCw8NAAoVAAVGWQAQAD8rABg/Ejk/ERIBOTkRMxEzMTABMhcHJiMiBhURIxEzFzM2NgKkSToXRDSFvaaJEwg9rARcDJoP2KH9tARIy2t0AAEAav/sA3MEXAAkADZAHB4TDAAAGAUTBCUmDB4DFhYbRlkWEAYDCUZZAxYAPysAGC8/KxESADk5ERIBFzkRMxEzMTABFAYjIic1FhYzMjY1NCYnLgI1NDYzMhcHJiMiBhUUFhYXFhYDc+TO2npPtVSCjG+hmYE/2r6xqTulhnZ4LWSOw4kBK5mmRZooLlNVQFs+OVVsS4abSIdESkEsPjg1R5AAAQAf/+wCqAVGABYANEAbEBQUCQsJEgMEGBcKExATR1kOQBAPBwBGWQcWAD8rABg/Gs0rEQAzERIBFzkRMxEzMTAlMjY3FQYGIyARESM1NzczFSEVIREUFgISLFIYG2kq/sKdnUZgAT7+wl51DQd/DREBTwKMUEXq/oH9e2NqAAABAKT/7AQ5BEgAFAA0QBkBEwcMDAoTChUWDA0NEAgUDxAERlkQFgsVAD8/KwAYPzMSOREzERIBOTkRMxEzETMxMAERFBYzMjY1ETMRIycjBgYjIiY1EQFMeoKsn6aJGAkztXTIxwRI/TmGhLzVAkD7uJNRVr7RAs0AAAEAAAAABAIESAALABhACgEKDA0FCQEPABUAPz8zORESATk5MTAhATMTFhczNhITMwEBoP5gsuxQDggLdcyy/mAESP125EQ1AU0CMPu4AAEAFwAABiMESAAcACxAFAkbHR4XFg4NAwQNBAgaEgkPAAgVAD8zPzMzEjk5ETMRMzMzERIBOTkxMCEDJicjBgcDIwEzEhIXMzY2NxMzExYXMzY2EzMBBC/JEzQIKB7PwP7VrmpvCAgLMRLJtMQ4FAgEI7+s/tECgzvRr1/9fwRI/mP+UEs5tTUCdf2LrHUklgLc+7gAAAEAJwAABAgESAALACJAEQcFBgABBQwNCQMBCAsVBAEPAD8zPzMSOTkREgEXOTEwAQEzAQEzAQEjAQEjAbj+g70BIQEgu/6DAZG8/s3+yrwCMQIX/lwBpP3p/c8BvP5EAAEAAv4UBAYESAAVACRAEgkPAAMWFwQNAA0SRlkNGwgADwA/Mj8rERIAORESARc5MTATMxMWFzM2NhMzAQYGIyInNRYzMjc3ArLwTxMIDVPmsv4pRruITEo3RKtJPQRI/Y/WXzP3Anz7ILmbEYUMwJwAAAEAUgAAA20ESAAJACtAFwgBAwcABwQBBAoLBQRHWQUPAQhHWQEVAD8rABg/KxESARc5ETMRMzEwISE1ASE1IRUBIQNt/OUCVv3PAuf9sgJdcQNWgYH8ugABAD3+vALBBbYAHAAsQBUZGhoLFwAADwcUAwMHCwMdHhMDBCcAPz8REgEXOREzETMzETMRMxEzMTAlFBYXFSYmNRE0JiM1NjY1ETQ2MxUGFREUBxUWFQHbdXG+0H54gnTYtubf3wxmXAKMAqqaAS9oWY0CXGABMpusiwbB/tnXJwwn1wABAe7+EAJ7BhQAAwAWQAkCAwMEBQMbAAAAPz8REgE5ETMxMAEzESMB7o2NBhT3/AABAEj+vALLBbYAHQAsQBUVBQoSEgIZAB0dDg4ZBQMeHxUnBgMAPz8REgEXOREzETMRMzMRMxEzMTABJjURNCc1MhYVERQWFxUiBhURFAYHNTY2NRE0NjcCCt/juNN2gnp+zb5vdG5xAj8n1wEnwQaLrpn+zmFbAo1ZaP7RmasCjAJcZgEpcngUAAABAGgCUAQpA1QAFwAkQBEDDxgZEgxQWQMSDwYGAFBZBgAvKwAQGMQvxCsREgE5OTEwASIGBzU2MzIWFxYWMzI2NxUGIyImJyYmAVI1fzZkkERxWUJiLzaANmaOSH5IS1oCyUM2l20cJhwbQDmWbiEgIBgAAAIAmP6LAYkEXgADAA4AK0AUAgQEAwkJDxAAAAMMDAZPWQwQAyIAPz8rERIAORgvERIBOREzMxEzMTATMxMjExQjIiY1NDYzMhbbaTPP4Xk8PD85M0YCrPvfBUyHR0A/SEAAAQC+/+wD2wXLABsAPkAeFggNAwMKBAAQEAQIAxwdGQUCEwoNAg0CDQQLBwQZAD8/Ejk5Ly8RMzMRMzMREgEXOREzETMzETMRMzEwJQYHFSM1JgI1ECU1MxUWFhcHJiMiBhUUFjMyNwPLaZOFy8EBjIdLjjExhW2sop+njY7wNgbIziABEfoB/D6spAMhF4wz09nUyzsAAQA/AAAERAXJAB0ASEAmGBMJDQ0aFhECCxYTBR4fDBgZGE5ZCRkZEwATEExZExgABUtZAAcAPysAGD8rERIAORgvMysRADMREgEXOREzMxEzETMxMAEyFwcmIyIGFREhFSEVFAYHIRUhNTY1NSM1MxE0NgKqvqo9mo97fQGm/lpBSgMb+/vNxsbgBclUhU18jP7Zf91kiCyajS/0338BPLLNAAACAHsBBgQXBKAAGwAnACBADRwAIg4ADigpHxUVJQcALzMzLzMREgE5OREzETMxMBM0Nyc3FzYzMhc3FwcWFRQHFwcnBiMiJwcnNyY3FBYzMjY1NCYjIga4Sodeh2iCf2aJX4ZKSoNciWZ/hmSHXIVKgZ10dJ6gcnSdAtN6a4xchUlJhVyKcXaDZ4dchUdJhVyIa3xwoJ9xcqKkAAABAB8AAARxBbYAFgBWQC4SDgcLCxAMBQkCCQMMFA4VBxcYCg4OBw8GEhIDABMVDxMfEwIPEw8TDAEVBgwYAD8/MxI5OS8vXRESOTIyETMRMzMRMxESARc5ETMRMzMRMxEzMTABATMBIRUhFSEVIREjESE1ITUhNSEBMwJIAXuu/mABBv7DAT3+w6T+xAE8/sQBAP5lsgLfAtf8/n+qf/70AQx/qn8DAgACAe7+EAJ7BhQAAwAHACRAEAIGBgMHBwgJBAMEAwcbAAAAPz85OS8vERIBOREzMxEzMTABMxEjETMRIwHujY2NjQYU/Pj+Dfz3AAIAe//4A5YGHQAxAD0AQ0AmMgATBioeOBkZHgwGACMGPj8VAzs2HC0GIQkhJ0dZIRUJEEdZCQAAPysAGD8rERIAFzkREgEXOREzETMRMxEzMTATNDY3JiY1NDYzMhYXByYmIyIGFRQWFxYWFRQGBxYVFAYjIic1FhYzMjY1NCYmJy4CNxQWFxc2NTQmJwYGi1ZOSlTPxV6fYTVih0x0dHuaupZSSpnq1NqATsJSho0wbHOOhkKShKcxiZO5RFUDKVaJJShvVXmLHSeDJxs7QDxUN0SXa1qNKVGSjJlBlCUtTEcuOjorNFpyYk1pPRNQb1NwORNkAAIBNQUOA2gF0wALABcAHkAMBgAMEgASGBkPAxUJAC8zzTIREgE5OREzETMxMAE0NjMyFhUUBiMiJiU0NjMyFhUUBiMiJgE1NSUmNzcmJTUBfTUlJTc3JSU1BXE0Li40MjExMjQuLjQyMTEAAAMAZP/sBkQFywAWACYANgBGQCcnFwMPLx8fFAkPFwU3OAYMABIPDB8MAgASEBICDBIMEhsrIxMzGwQAPzM/MxI5OS8vXV0RMxEzERIBFzkRMxEzETMxMAEiBhUUFjMyNxUGBiMiJjU0NjMyFwcmATQSJDMyBBIVFAIEIyIkAjcUEgQzMiQSNTQCJCMiBAIDfX2Hf4NWfTBlRsLQ3b+Adjps/JfIAV7KyAFeysL+otDP/qLDaa4BLayuASqvrv7XsK7+1q8EI66aqKItfBQc8djR9jx2M/64yAFeysj+osrF/qbQzwFaxq3+062uASmwrgEqr67+1wAAAgBGAxQCcQXHABYAHwA3QBwXBhsKAQEWFhAGAyAhHAoKEhkWAAMQAwIDDRIfAD8z1F3EMxI5LzMREgEXOREzETMzETMxMAEnBiMiJjU0Njc3NTQjIgcnNjMyFhURJRQzMjU1BwYGAhQYXIxfb5qldZRkaCtyhYKJ/lBwyWJwZwMhVGFjZmZpBgQnhTNgOGl5/jy8ZLQxBAQ5AAIAUgB1A6oDvgAGAA0AKUATAwYKDQIECwkJBA0GBA4PDAUIAQAvMy8zERIBFzkRMxEzETMRMzEwEwEXAQEHASUBFwEBBwFSAVZ3/t8BIXf+qgGLAVh1/uEBH3X+qAInAZdF/qL+oUcBlxsBl0X+ov6hRwGXAAABAGgBCAQpAxcABQAbQAwCAQQBBgcFBFBZBQIALy8rERIBOTkRMzEwAREjESE1BCmJ/MgDF/3xAYWKAP//AFQB2QI/AnECBgAQAAAABABk/+wGRAXLAAgAFgAmADYAXUAzJxcAERESBAkvHx8NCQwSFwY3OAwQEAAADhMOEggTDxIfEgIAExATAhITEhMbKyMTMxsEAD8zPzMSOTkvL11dETMRMxESOS8zETMREgEXOREzETMRMxEzETMxMAEzMjY1NCYjIwUUBgcTIwMjESMRITIWATQSJDMyBBIVFAIEIyIkAjcUEgQzMiQSNTQCJCMiBAIC02xQYVZdagGyVU3uqM+HlAEFppv738gBXsrIAV7Kwv6i0M/+osNprgEtrK4BKq+u/tewrv7WrwL6U0BLQYhQex7+dQFi/p4De4L+xcgBXsrI/qLKxf6m0M8BWsat/tOtrgEpsK4BKq+u/tcAAf/6BhQEBgaTAAMAEbUABQEEAQIALzMRATMRMzEwASE1IQQG+/QEDAYUfwACAH8DXALuBcsADAAYACFADg0AEwYABhkaEArAFgMEAD8zGswyERIBOTkRMxEzMTATNDYzMhYVFAYGIyImNxQWMzI2NTQmIyIGf7WCgrZSklSCtXN1UVBzcVJTcwSTgra1g1SPVLSDUnJxU1RxcgD//wBoAAEEKQTDAiYADgAAAAcCKwAA/XQAAQAxAkoCjQXJABgAI0ARBxMXAQEOEwAEGhkKEB8XASAAPzM/MxESARc5ETMRMzEwASE1Nz4CNTQmIyIGByc2MzIWFRQGBwchAo39pOxZUiFQPzRiRUKDmISTWZOuAbgCSmjmVmFMNkRFJjJYb4JwUJeKpQABACECOQKNBckAIwA5QCIPBQUAAxIeCgYkJRJdE20TAkwTAQsTGxMCExMIGiEfDQghAD8zPzMSOS9dXV0zERIBFzkRMzEwARQGBxYVFAYjIic1FjMyNTQjIzUzMjY1NCYjIgYHJzY2MzIWAnNSRLC4qJh0k3vT53V3Z2NQQ0JwOEU/jF6InQTnUGcXL6KAjzh7RKKRa09EPUQrI1otNncAAQGJBNkDEgYhAAkAE7YJBAoLBIAJAC8azRESATk5MTABNjY3MxUGBgcjAYkwbyDKLK5AbwTyPrBBFUG+NAABALD+FAREBEgAFgA1QBoFCgoIEAATExQIFBgXBhUPFBsNAkZZDRYJFQA/PysAGD8/MxESATk5ETMRMzMRMxEzMTABEDMyNjURMxEjJyMGIyInIxYVESMRMwFW/qufpogaCm/lllgKCqamAX3++r3UAkD7uJOnXFSg/sAGNAABAHH+/ARgBhQADwAnQBIEBQEAAAULAxARCAgFAw8FAQUALzM/MxI5LxESARc5ETMRMzEwASMRIxEjEQYjIiY1EDYzIQRgctVzPlTYy9roAi3+/Aaw+VADMxL6+wEE/gABAJgCTAGJA1oACwAXQAoGAAANDAMJT1kDAC8rERIBOREzMTATNDYzMhYVFAYjIiaYPjg6QUI5M0MC00JFRUJBRj8AAAEAJf4UAbQAAAASACRAEBEOCwAADgUDExQOEREIAxAAL8wyOS8zERIBFzkRMxEzMTABFAYjIic1FjMyNjU0Jic3MwcWAbSZljMtLTtPUU9tWG43tP7fYWoJaggoNis1EbJzJwABAEwCSgHhBbYACgAgQA4CAAMDCgwLCQkDIAYAHgA/Mj85LxESATk5ETMzMTABMxEjETQ3BgYHJwFSj4UGFjaHQwW2/JQCQ1taFi1fYAACAEIDFAK+BccACwAXACVAEgwGEgAGABgZDwADEAMCAxUJHwA/M8RdMhESATk5ETMRMzEwARQGIyImNTQ2MzIWBRQWMzI2NTQmIyIGAr6rlpKpqJeYpf3+W2hpXFxpZ1wEb6S3uqGjtbaienp6ent2dgACAFAAdQOoA74ABgANACNAEQsJBAIAAwcCCgkGDg8MBQgBAC8zLzMREgEXOREzETMxMAEBJwEBNwEFAScBATcBA6j+qHUBH/7hdQFY/nX+qHUBH/7hdQFYAgz+aUcBXwFeRf5pG/5pRwFfAV5F/mn//wBLAAAF0QW2ACcCFwKDAAAAJgB7/wABBwI8Ax39twAJswMCEhgAPzU1AP//AC4AAAXbBbYAJwIXAj8AAAAmAHviAAEHAHQDTv23AAeyAhAYAD81AP//ABoAAAYhBckAJgB1+QAAJwIXAt8AAAEHAjwDbf23AAmzAwIrGAA/NTUAAAIAM/53A1QEXgAdACgAQUAiCBQeIwEcDxwjFAQpKgAdAQwDHR0RJiYgT1kmEBELSVkRIwA/KwAYPysREgA5GC9fXl0REgEXOREzETMRMzEwARUUBgcOAhUUFjMyNjcXBiMiJjU0PgI3NjY1NRMUIyImNTQ2MzIWAk5LYXk9GYR6UJZiO8XGvtgjQFk2ZUG0eTs+QjczRgKsM3qUVGpLTThkcSYwh2C6qkZpWVIvWHRdHwErh0VCQEdA//8AAAAABRAHcwImACQAAAEHAEP/wgFSAAizAhAFJgArNf//AAAAAAUQB3MCJgAkAAABBwB2AIUBUgAIswIYBSYAKzX//wAAAAAFEAdzAiYAJAAAAQcBSwAjAVIACLMCHQUmACs1//8AAAAABRAHLwImACQAAAEHAVIABAFSAAizAhgFJgArNf//AAAAAAUQByUCJgAkAAABBwBqADcBUgAKtAMCJAUmACs1Nf//AAAAAAUQBwYCJgAkAAAABwFQADkAgQAC//4AAAaBBbYADwATAE5ALAoODhEBAAgMARAFBRUFFAkTBhNJWRADSVkKDUlZEAoQCgEGAwUSAQ5JWQESAD8rABg/PxI5OS8vKysrEQAzEQEzERIXOREzMxEzMTAhIREhAyMBIRUhESEVIREhASERIwaB/RL9/uOwAroDyf28Ah394wJE+1QBvnYB0f4vBbaX/imW/eYB0gK1AP//AH3+FATPBcsCJgAmAAAABwB6AgIAAP//AMkAAAP4B3MCJgAoAAABBwBD/7cBUgAIswENBSYAKzX//wDJAAAD+AdzAiYAKAAAAQcAdgA/AVIACLMBFQUmACs1//8AyQAAA/gHcwImACgAAAEHAUv/+wFSAAizARoFJgArNf//AMkAAAP4ByUCJgAoAAABBwBqABIBUgAKtAIBIQUmACs1Nf//ADwAAAJWB3MCJgAsAAABBwBD/rMBUgAIswENBSYAKzX//wBUAAACcwdzAiYALAAAAQcAdv9hAVIACLMBFQUmACs1/////wAAAqEHcwImACwAAAEHAUv+8wFSAAizARoFJgArNf//ADwAAAJvByUCJgAsAAABBwBq/wcBUgAKtAIBIQUmACs1NQACAC8AAAVIBbYADAAXAFdAMhEVFQgEDQAAEwQGBBgZFAYHBklZEQ8HPwevB88H3wcFCwMHBwQJCRBKWQkDBBVKWQQSAD8rABg/KxESADkYL19eXTMrEQAzERIBFzkRMxEzMxEzMTABEAAhIREjNTMRISAAAxAhIxEhFSERMyAFSP53/o/+e5qaAbIBUQF8tf3H5wF7/oW+AmIC6f6W/oECiZYCl/6J/qQCQP38lv4K//8AyQAABT8HLwImADEAAAEHAVIAkwFSAAizARoFJgArNf//AH3/7AW+B3MCJgAyAAABBwBDAHkBUgAIswIZBSYAKzX//wB9/+wFvgdzAiYAMgAAAQcAdgEKAVIACLMCIQUmACs1//8Aff/sBb4HcwImADIAAAEHAUsAtAFSAAizAiYFJgArNf//AH3/7AW+By8CJgAyAAABBwFSAJoBUgAIswIhBSYAKzX//wB9/+wFvgclAiYAMgAAAQcAagDVAVIACrQDAi0FJgArNTUAAQCFARAEDASYAAsAGUAJBwkDAQkBDA0IABkvERIBOTkRMxEzMTABFwEBBwEBJwEBNwEDrGD+oAFeYP6e/qRlAV7+oGQBYQSYY/6e/qBjAV/+oWMBYAFgZf6dAAADAH3/wwW+BfYAEwAbACMATkAsFh8XHgQcFBwKFAAAEg8FCAoGJCUWHiEZDSFJWQ8SCAUEAxANBAMZSVkGAxMAP8YrABg/xhIXOSsREgA5ORESARc5ETMRMxESFzkxMAEQACEiJwcnNyYREAAhMhc3FwcWAxAnARYzMhIBEBcBJiMiAgW+/p3+xOuUZXhssgFgAUTRnWF4asC0bv1gc7Dz+PwnZQKdaqjz/QLd/qH+bmSNT5rGAW0BZQGJXodQlMr+lQEQmvxMUgEyASr++poDr0n+zQD//wC6/+wFGQdzAiYAOAAAAQcAQwBGAVIACLMBEwUmACs1//8Auv/sBRkHcwImADgAAAEHAHYAzwFSAAizARsFJgArNf//ALr/7AUZB3MCJgA4AAABBwFLAH0BUgAIswEgBSYAKzX//wC6/+wFGQclAiYAOAAAAQcAagCYAVIACrQCAScFJgArNTX//wAAAAAEewdzAiYAPAAAAQcAdgAxAVIACLMBEgUmACs1AAIAyQAABHkFtgAMABUANkAcDQkFBQYRAAYAFhcNBEpZCRVKWQ0JDQkGBwMGEgA/PxI5OS8vKysREgE5OREzETMRMzMxMAEUBCEjESMRMxEzIAQBMzI2NTQmIyMEef7R/uG4qqrXARkBFvz6qOLKvsrMAxDj7v7BBbb/AM/96o+klYoAAAEAsP/sBJwGHwAwAEFAIikqBR0jABcMDAAdESoFMTISEiouLiZGWS4AKhUPFUZZDxYAPysAGD8/KxESADkYLxESARc5ETMRMxEzETMxMAEUBwYGFRQWFhcWFhUUBiMiJzUWFjMyNTQmJyYmNTQ2NzY2NTQmIyAVESMRNDYzMhYEGY9YOBtHToxmwrO8az+cSNdTbn9gRUdLQIh//uym3N7O4QTyh3NGQyEgKjkzX51loKtFmicvtktrRlJ7VD9qNTlaNVBV3/tMBLKyu53//wBe/+wDzQYhAiYARAAAAQYAQ44AAAizAiYRJgArNf//AF7/7APNBiECJgBEAAABBgB2KwAACLMCLhEmACs1//8AXv/sA80GIQImAEQAAAEGAUvYAAAIswIzESYAKzX//wBe/+wDzQXdAiYARAAAAQYBUr0AAAizAi4RJgArNf//AF7/7APNBdMCJgBEAAABBgBq4gAACrQDAjoRJgArNTX//wBe/+wDzQaFAiYARAAAAQYBUPcAAAq0AwIoESYAKzU1AAMAXv/sBnMEXAApADQAOwBhQDMqACQRMDgZGQQwORgYHzALAAU8PRstJy1GWRkxBDFHWTgkJxEEBA4iJxY1CA4IRlkUDhAAPzMrEQAzGD8zEjkvORI5MysRADMrEQAzERIBFzkRMxEzMxEzEjk5ETMxMBM0Njc3NTQmIyIHJzY2MzIWFzY2MzISFRUhEiEyNjcVBgYjICcGBiMiJjcUFjMyNjU1BwYGASIGByE0Jl74/rh0d5CjNErHYoKlKTWrbsDo/UMIATpbnVRWlWX+331RxYajua5rWJGonrqkA715iwsCB4ABL6GzCAZEgXtUfyk1V19YYP713mv+dSMnlCYh6X9qqpdfWamaYwcIbQIypp6cqAD//wBz/hQDiwRcAiYARgAAAAcAegFGAAD//wBz/+wEEgYhAiYASAAAAQYAQ7UAAAizAhwRJgArNf//AHP/7AQSBiECJgBIAAABBgB2TgAACLMCJBEmACs1//8Ac//sBBIGIQImAEgAAAEGAUv3AAAIswIpESYAKzX//wBz/+wEEgXTAiYASAAAAQYAagoAAAq0AwIwESYAKzU1////2gAAAWMGIQImAPMAAAEHAEP+UQAAAAizAQURJgArNf//AKkAAAIyBiECJgDzAAABBwB2/yAAAAAIswENESYAKzX///+zAAACVQYhAiYA8wAAAQcBS/6nAAAACLMBEhEmACs1////7AAAAh8F0wImAPMAAAEHAGr+twAAAAq0AgEZESYAKzU1AAIAcf/sBGIGIQAbACYASkArIQYMHBwAABgZFg4RExAGCScoCR9GWQsDFhEZDg8FFAkJAxcUAQMkRlkDFgA/KwAYPzMSOS8SFzkSOSsREgEXOREzETMRMzEwARAAIyIANTQAMzIXNyYnBSc3Jic3Fhc3FwcWEgM0JiMgERQWMzI2BGL++/fe/ukBB9ziZAg5zf7xSelcXkWcZu5Mz5ilqLSc/q+voq+hAjP+5/7SAQ3i5gEGeQTWv5tshT4xdUlLimt3j/5y/uiTqv6Yp7fJAP//ALAAAAREBd0CJgBRAAABBgFSDgAACLMBHhEmACs1//8Ac//sBGIGIQImAFIAAAEGAEPUAAAIswIaESYAKzX//wBz/+wEYgYhAiYAUgAAAQYAdlYAAAizAiIRJgArNf//AHP/7ARiBiECJgBSAAABBgFLDgAACLMCJxEmACs1//8Ac//sBGIF3QImAFIAAAEGAVLxAAAIswIiESYAKzX//wBz/+wEYgXTAiYAUgAAAQYAahsAAAq0AwIuESYAKzU1AAMAaAD8BCkEqAADAA8AGwAzQBgWCgoQBAIEAQMcHRkTEwEHDQ0BAQBQWQEALysRADMYLzMRMy8zERIBFzkRMzMRMzEwEzUhFQE0NjMyFhUUBiMiJhE0NjMyFhUUBiMiJmgDwf2uOzY0OjszND07NjQ6OzM0PQKNior+6Dw9Pzo5QD8C9Dw9Pzo5QD8AAwBz/7wEYgSHABMAGwAjAEtAKRcfHBQUChwAABIPBQgKBiQlFh4hGQ0ZRlkPEggFBAMQDRADIUZZBgMWAD/GKwAYP8YSFzkrERIAOTkREgEXOREzETMREjk5MTABEAAjIicHJzcmERAAMzIXNxcHFgUUFwEmIyIGBTQnARYzMjYEYv7y7ppwVHJegQEM7pp0VHVhf/y9NQHRS3KjpgKXM/4vR3GjqQIl/vT+00V1ToOYAQABDAErTHdMhZj5q2YChjXW1KRk/X0z2wD//wCk/+wEOQYhAiYAWAAAAQYAQ8QAAAizARYRJgArNf//AKT/7AQ5BiECJgBYAAABBgB2cQAACLMBHhEmACs1//8ApP/sBDkGIQImAFgAAAEGAUsSAAAIswEjESYAKzX//wCk/+wEOQXTAiYAWAAAAQYAaiEAAAq0AgEqESYAKzU1//8AAv4UBAYGIQImAFwAAAEGAHYSAAAIswEfESYAKzUAAgCw/hQEdQYUABYAIgA+QB8gBhsUEBARBhEkIxIAERsMFgkDCR5GWQkWAxdGWQMQAD8rABg/KxESADk5GD8/ERIBOTkRMxEzMxEzMTABNjYzMhIREAIjIicjFxYVESMRMxEUByUiBgcVFBYzIBE0JgFYQqpq1/Dx1t56DAQIpqYGAUiomAKaqgEvlAO0WU/+1P71/vT+06EiTT/+NQgA/i40Whu4ySnnxwGw19H//wAC/hQEBgXTAiYAXAAAAQYAarUAAAq0AgErESYAKzU1//8AAAAABRAGtAImACQAAAEHAU0APwFSAAizAhIFJgArNf//AF7/7APNBWICJgBEAAABBgFN9QAACLMCKBEmACs1//8AAAAABRAHNwImACQAAAEHAU4AKwFSAAizAg8FJgArNf//AF7/7APNBeUCJgBEAAABBgFO5AAACLMCJREmACs1//8AAP5CBREFvAImACQAAAAHAVEDoAAA//8AXv5CBAAEWgImAEQAAAAHAVECjwAA//8Aff/sBM8HcwImACYAAAEHAHYBCAFSAAizASAFJgArNf//AHP/7AOLBiECJgBGAAABBgB2RAAACLMBIBEmACs1//8Aff/sBM8HcwImACYAAAEHAUsArAFSAAizASUFJgArNf//AHP/7AOLBiECJgBGAAABBgFL1AAACLMBJREmACs1//8Aff/sBM8HMQImACYAAAEHAU8CGwFSAAizASAFJgArNf//AHP/7AOLBd8CJgBGAAABBwFPAVAAAAAIswEgESYAKzX//wB9/+wEzwdzAiYAJgAAAQcBTADBAVIACLMBIgUmACs1//8Ac//sA6EGIQImAEYAAAEGAUzzAAAIswEiESYAKzX//wDJAAAFWAdzAiYAJwAAAQcBTABYAVIACLMCHQUmACs1//8Ac//sBYEGFAImAEcAAAEHAjgDDAAAAAeyAiMAAD81AP//AC8AAAVIBbYCBgCSAAAAAgBz/+wE0wYUABoAJwBkQDclBhIOAB4eFRkWGRAGBCgpGhUYEBEQR1kVDxEfES8RAwkDEREJEwABDAMJCSJGWQkQAxtGWQMWAD8rABg/KxESADk5GD8SOS9fXl0zKxEAMxg/ERIBFzkRMzMRMzMzETMxMCUjBiMiAhEQEjMyFzMmNTUhNSE1MxUzFSMRIyUyNjU1NCYjIgYVFBYDmglz5dfv8Nbfdw0L/kABwKacnIf+nqqZm6qSm5qTpwEmAQ8BDwEsolNJhYG4uIH7JXe5ziPpx+PP0tb//wDJAAAD+Aa0AiYAKAAAAQcBTQASAVIACLMBDwUmACs1//8Ac//sBBIFYgImAEgAAAEGAU0KAAAIswIeESYAKzX//wDJAAAD+Ac3AiYAKAAAAQcBTgAQAVIACLMBDAUmACs1//8Ac//sBBIF5QImAEgAAAEGAU77AAAIswIbESYAKzX//wDJAAAD+AcUAiYAKAAAAQcBTwFvATUACLMBFQUmACs1//8Ac//sBBIF3wImAEgAAAEHAU8BVAAAAAizAiQRJgArNf//AMn+QgP4BbYCJgAoAAAABwFRAnMAAP//AHP+YQQSBFwCJgBIAAAABwFRAmYAH///AMkAAAP4B3MCJgAoAAABBwFMABABUgAIswEXBSYAKzX//wBz/+wEEgYhAiYASAAAAQYBTPsAAAizAiYRJgArNf//AH3/7AU9B3MCJgAqAAABBwFLAOkBUgAIswEqBSYAKzX//wAn/hQEMQYhAiYASgAAAQYBS8oAAAizA1ARJgArNf//AH3/7AU9BzcCJgAqAAABBwFOAQABUgAIswEcBSYAKzX//wAn/hQEMQXlAiYASgAAAQYBTs4AAAizA0IRJgArNf//AH3/7AU9BzECJgAqAAABBwFPAmQBUgAIswElBSYAKzX//wAn/hQEMQXfAiYASgAAAQcBTwEfAAAACLMDSxEmACs1//8Aff47BT0FywImACoAAAAHAjkBJwAA//8AJ/4UBDEGIQImAEoAAAEGAjpEAAAIswNGESYAKzX//wDJAAAFHwdzAiYAKwAAAQcBSwCWAVIACLMBGgUmACs1//8AsAAABEQHqgImAEsAAAEHAUsAHwGJAAizASUCJgArNQACAAAAAAXnBbYAEwAXAFRALBcDDw8AEBQEDAwHCwgLEBIEGBkXDklZFgoSExJKWQcDExcTFxMBDBASBQEDAD8zPzMSOTkvLxEzMysRADMzKxESARc5ETMzETMzETMzETMzMTATNTMVITUzFTMVIxEjESERIxEjNQE1IRXJqgMCqsjIqvz+qskEdfz+BL74+Pj4jfvPArD9UAQxjf6K6ekAAQAUAAAERAYUAB4AWUAyFhQQCAgNCQAeHhIJCwQfIBcWGgRGWRMLDAtHWRAMDwwfDC8MAxYaDAwaFgMJDgAACRUAPzM/Ehc5Ly8vXREzKxEAMysRADMREgEXOREzETMzETMzMzEwIRE0JiMiBhURIxEjNTM1MxUhFSEVFAczNjYzMhYVEQOeeoKunqacnKYBwf4/CAoxtXTJyQKehoS61f3nBNt/urp/xFQ4T1u/0v1c////4gAAAsoHLwImACwAAAEHAVL+2gFSAAizARUFJgArNf///5AAAAJ4Bd0CJgDzAAABBwFS/ogAAAAIswENESYAKzX//wAqAAACgga0AiYALAAAAQcBTf79AVIACLMBDwUmACs1////2gAAAjIFYgImAPMAAAEHAU3+rQAAAAizAQcRJgArNf//AB4AAAKKBzcCJgAsAAABBwFO/vkBUgAIswEMBSYAKzX////MAAACOAXlAiYA8wAAAQcBTv6nAAAACLMBBBEmACs1//8AVP5CAlYFtgImACwAAAAGAVFoAP//ADX+QgGBBd8CJgBMAAAABgFREAD//wBUAAACVgcxAiYALAAAAQcBTwBQAVIACLMBFQUmACs1AAEAsAAAAVYESAADABZACQABAQUEAg8BFQA/PxESATkRMzEwISMRMwFWpqYESP//AFT+fwQQBbYAJgAsAAAABwAtAqgAAP//AKL+FANsBd8AJgBMAAAABwBNAgYAAP///2D+fwJlB3MCJgAtAAABBwFL/rcBUgAIswEcBSYAKzX///+R/hQCTwYhAiYCNwAAAQcBS/6hAAAACLMBGxEmACs1//8Ayf47BOkFtgImAC4AAAAHAjkAiQAA//8AsP47BB0GFAImAE4AAAAGAjkrAAABALAAAAQbBEYADQAvQBkNCwcHCAMBAgUIBQ4PAg0FBgQIAAkPBAgVAD8zPzMSFzkREgEXOREzETMzMTABMwEBIwEHESMRMxEUBwMvz/5iAbvJ/peHsrIMBEb+Hv2cAfhx/nkERv7lpnH//wDJAAAD+AdzAiYALwAAAQcAdv9jAVIACLMBDwUmACs1//8AowAAAiwHrAImAE8AAAEHAHb/GgGLAAizAQ0CJgArNf//AMn+OwP4BbYCJgAvAAAABgI5MQD//wBZ/jsBVwYUAiYATwAAAAcCOf7oAAD//wDJAAAD+AW3AiYALwAAAQcCOAEd/6MAB7IBCQMAPzUA//8AsAAAAqAGFAImAE8AAAEGAjgrAAAHsgEHAAA/NQD//wDJAAAD+AW2AiYALwAAAAcBTwIE/Wf//wCwAAACqAYUACYATwAAAAcBTwFC/TgAAQAdAAAD+AW2AA0APUAhBwsLBAAMCQADBA8OCQcECgMBBggCCAIIAAUDAAtJWQASAD8rABg/Ejk5Ly8SFzkREgEXOREzMxEzMTAzEQcnNxEzESUXBREhFclpQ6yqASlD/pQChQH8O3JlAx79Rq550/48mgAB//wAAAInBhQACwA3QBwABAQJBQUMAg0IDAACCQMIBgYBBwEHAQUKAAUVAD8/Ejk5Ly8SFzkRATMRMxI5ETMzETMxMAE3FwcRIxEHJzcRMwFWiUjRpm5GtKYDYF5wjf0/AlRIcXcDIAD//wDJAAAFPwdzAiYAMQAAAQcAdgECAVIACLMBGgUmACs1//8AsAAABEQGIQImAFEAAAEGAHZ5AAAIswEeESYAKzX//wDJ/jsFPwW2AiYAMQAAAAcCOQDNAAD//wCw/jsERARcAiYAUQAAAAYCOVYA//8AyQAABT8HcwImADEAAAEHAUwApgFSAAizARwFJgArNf//ALAAAAREBiECJgBRAAABBgFMHwAACLMBIBEmACs1//8AAQAABMsFtgAnAFEAhwAAAQYCB+gAAAeyARwDAD81AAABAMn+fwU/BbYAGQA4QBwQDQ0OCBQUFxcCDgMaGxIKDhUPAw4SAAVJWQAiAD8rABg/PzMSOTkREgEXOREzETMRMxEzMTABIic1FjMyNjUBIxIVESMRMwEzJjURMxEUBgPJYjZHU2lq/MAIEJ3AAx0IDp/B/n8bkRR6bwTL/vie/NsFtvtOleADPfpYw8wAAQCw/hQERARcAB0AOEAeEw8PEAcbGwIQAx4fFwtGWRcQExARDxAVAAVGWQAbAD8rABg/PxI5PysREgEXOREzETMRMzEwASInNRYzMjURNCYjIgYVESMRMxczNjYzMhYVERQGAyVWNzw+jHqCrKCmhxsKNLRuy8eM/hQZhxSsA3mGhLrW/cEESJZSWL/S/I2aqv//AH3/7AW+BrQCJgAyAAABBwFNAMcBUgAIswIbBSYAKzX//wBz/+wEYgViAiYAUgAAAQYBTRIAAAizAhwRJgArNf//AH3/7AW+BzcCJgAyAAABBwFOAMEBUgAIswIYBSYAKzX//wBz/+wEYgXlAiYAUgAAAQYBTg4AAAizAhkRJgArNf//AH3/7AW+B3MCJgAyAAABBwFTARQBUgAKtAMCKwUmACs1Nf//AHP/7ARiBiECJgBSAAABBgFTWgAACrQDAiwRJgArNTUAAgB9/+wG5wXNABQAHwBTQC4YBg8TEx0ADREdBgUgIQ8SSVkPDwALCw5JWQsDCRVJWQkEAxtJWQMSABNJWQASAD8rABg/KwAYPysAGD8rERIAORgvKxESARc5ETMRMxEzMTAhIQYjIAAREAAhMhchFSERIRUhESEBIgAREAAzMjcRJgbn/QBmXP65/p8BXAFAZloDDv2zAif92QJN/ET5/v8BAfdwV1cUAYkBagFoAYYXl/4plv3mBJ3+z/7Z/tf+zSEEdR4AAwBx/+wHHwRaAB4AKgAxAFVALR8IDgIWFiUvFRUcJQgEMjMrKAsoRlkuFkZZAgUOCy4uBRELEBgiBSJGWQAFFgA/MysRADMYPzMSOS8SORI5KysRADMREgEXOREzETMSOTkRMzEwBSAnBgYjIgAREAAzMhYXNjYzMhIVFSESITI2NxUGBgEUFjMyNjU0JiMiBiUiBgchNCYFlv7bfT7Rid/+9AEG64PNPjrAfsnu/ScIAUpeoVdYmPshmKejmZulppUER3+RDAIghBTrdHcBMQEIAQkBLHdycHn+9+Jp/ncjJ5QnIAI509vV0d3V2Niknp6k//8AyQAABM8HcwImADUAAAEHAHYAeQFSAAizAh8FJgArNf//ALAAAAMnBiECJgBVAAABBgB23AAACLMBGhEmACs1//8Ayf47BM8FtgImADUAAAAGAjl9AP//AGD+OwMnBFwCJgBVAAAABwI5/u8AAP//AMkAAATPB3MCJgA1AAABBwFMABsBUgAIswIhBSYAKzX//wCCAAADJwYhAiYAVQAAAQcBTP92AAAACLMBHBEmACs1//8Aav/sBAIHcwImADYAAAEHAHYAUAFSAAizAS4FJgArNf//AGr/7ANzBiECJgBWAAABBgB26gAACLMBLhEmACs1//8Aav/sBAIHcwImADYAAAEHAUv/6gFSAAizATMFJgArNf//AGr/7ANzBiECJgBWAAABBgFLlwAACLMBMxEmACs1//8Aav4UBAIFywImADYAAAAHAHoBJwAA//8Aav4UA3MEXAImAFYAAAAHAHoA1QAA//8Aav/sBAIHcwImADYAAAEHAUz/5AFSAAizATAFJgArNf//AGr/7ANzBiECJgBWAAABBgFMmQAACLMBMBEmACs1//8AEv47BFoFtgImADcAAAAGAjkZAP//AB/+OwKoBUYCJgBXAAAABgI5ggD//wASAAAEWgdzAiYANwAAAQcBTP/cAVIACLMBEwUmACs1//8AH//sAtcGFAImAFcAAAEGAjhiAAAHsgEaAAA/NQAAAQASAAAEWgW2AA8AP0AhBwsLAAwECQwOAgUQEQoODw5KWQcPDwMMEgYCAwJJWQMDAD8rEQAzGD8SOS8zKxEAMxESARc5ETMzETMxMAERITUhFSERIRUhESMRITUB4f4xBEj+MQE2/sqq/scDLwHwl5f+EI39XgKijQABAB//7AKoBUYAHABMQCkXExsbDAgCFRkICg4GHR4OFhMWR1kaCgsKR1kXCwsGEUATDwYARlkGFgA/KwAYPxrNEjkvMysRADMrEQAzERIBFzkRMzMRMzMxMCUyNxUGBiMgETUjNTMRIzU3NzMVIRUhESEVIRUUAhdVPCBqKv7IjY2dnUZgAT7+wgEt/tN1FH8OEAFc/oEBAFBF6v6B/wCB9N0A//8Auv/sBRkHLwImADgAAAEHAVIAbwFSAAizARsFJgArNf//AKT/7AQ5Bd0CJgBYAAABBgFS9wAACLMBHhEmACs1//8Auv/sBRkGtAImADgAAAEHAU0AkQFSAAizARUFJgArNf//AKT/7AQ5BWICJgBYAAABBgFNGQAACLMBGBEmACs1//8Auv/sBRkHNwImADgAAAEHAU4AiwFSAAizARIFJgArNf//AKT/7AQ5BeUCJgBYAAABBgFOEgAACLMBFREmACs1//8Auv/sBRkH1wImADgAAAEHAVAAnAFSAAq0AgEVBSYAKzU1//8ApP/sBDkGhQImAFgAAAEGAVAjAAAKtAIBGBEmACs1Nf//ALr/7AUZB3MCJgA4AAABBwFTAOEBUgAKtAIBJQUmACs1Nf//AKT/7AQ5BiECJgBYAAABBgFTaAAACrQCASgRJgArNTX//wC6/kIFGQW2AiYAOAAAAAcBUQIhAAD//wCk/kIEZQRIAiYAWAAAAAcBUQL0AAD//wAbAAAHTAdzAiYAOgAAAQcBSwFUAVIACLMBKAUmACs1//8AFwAABiMGIQImAFoAAAEHAUsAwQAAAAizASsRJgArNf//AAAAAAR7B3MCJgA8AAABBwFL/+ABUgAIswEXBSYAKzX//wAC/hQEBgYhAiYAXAAAAQYBS60AAAizASQRJgArNf//AAAAAAR7ByUCJgA8AAABBwBq//EBUgAKtAIBHgUmACs1Nf//AFIAAAQ/B3MCJgA9AAABBwB2AEIBUgAIswETBSYAKzX//wBSAAADbQYhAiYAXQAAAQYAdugAAAizARMRJgArNf//AFIAAAQ/BzECJgA9AAABBwFPAUQBUgAIswETBSYAKzX//wBSAAADbQXfAiYAXQAAAQcBTwDfAAAACLMBExEmACs1//8AUgAABD8HcwImAD0AAAEHAUz/7QFSAAizARUFJgArNf//AFIAAANtBiECJgBdAAABBgFMhgAACLMBFREmACs1AAEAsAAAAtsGHwAMAB1ADgABAQ0GDgQJRlkEAAEVAD8/KxEBMxI5ETMxMCEjERAhMhcHJiMiBhUBVqYBZ2BkK1dJYVkEnAGDJYUee3oAAAEAw/4UBBcFywAgAERAJBoeHgwIEhwICgIFISIdCgwKRlkaDAwQABAWRlkQBAAFRlkAGwA/KwAYPysREgA5GC8zKxEAMxESARc5ETMzETMxMAEiJzUWMzI2NREjNTc1NDYzMhcHByYjIgYVFSEVIREUBgFIRUBGPV9N3t6itlV4FhVmPGJQARr+6p7+FBOLEmZxA81LPIvDsitAQSBpfJWB/De4rwAEAAAAAAUUB6oAEAAYACIALgBhQDQRBQQYBhQHBAMHCCMAKQsICwkiFAIAHQMJMC8mDiwCCRgGSVkJFA4YIg4YGA4iAwgcBAgSAD8zLxIXOS8vLxESOTkrEQAzMxEzERIBFzkRMxEzETMRMxESOTkROTkxMAEUBwEjAyEDIwEmNTQ2MzIWEwMmJwYGBwMTNjY3MxUGBgcjEzQmIyIGFRQWMzI2A2hoAhSusP2epq4CFGp6Y2R9G7IZLw4wCbGYMWYXyyCoQm/TQjMzQjw5NUAFloU4+ycBkf5vBNc0iGVydfw2AbA6kTCHGP5UBIU7lSoQLqEt/vU5PDw5Nz09AAUAXv/sA80HqgAJACQALwA7AEcAZ0A3LRJCNjwwKRUVCyQkBjAANh0SB0hJCQkEPzlFMxELDBUpR1kMFRUPICAZRlkgEA8lRlkPFgoVBAAvPz8rABg/KxESADkYLzkrEQAzGD8zxDIROS8REgEXOREzMxEzETMRMxEzMTABNTY2NyEVBgYHAScjBgYjIiY1ECU3NTQmIyIGByc2NjMyFhURJTI2NTUHBgYVFBYBFAYjIiY1NDYzMhYHNCYjIgYVFBYzMjYB1y5qFgEEFaSAAQIhCFKjeqO5Ahm0d4Vgp0c3VNBl0cn+DpuxpsavbQGqe2ZleXllZXxtQTMzQjw5NEAG2RAqeB8MGGlE+SecZ0momwFMEAZEgno0IH8rM67A/RR1qpljBwdtc1peBT1id3RjYnN3Xjg9PTg4PT0A/////gAABoEHcwImAIgAAAEHAHYCTAFSAAizAh0FJgArNf//AF7/7AZzBiECJgCoAAABBwB2AYUAAAAIswNFESYAKzX//wB9/8MFvgdzAiYAmgAAAQcAdgEZAVIACLMDLQUmACs1//8Ac/+8BGIGIQImALoAAAEGAHZWAAAIswMtESYAKzX//wBq/jsEAgXLAiYANgAAAAYCOQYA//8Aav47A3MEXAImAFYAAAAGAjm5AAABAQwE2QOuBiEADgAYQAkHABAPCwSADgkALzMazTIREgE5OTEwATY2NzMWFhcVIyYnBgcjAQx/ZhemFm19d1iFiFNzBPCIgCkqhYIXN4OGNAAAAQEMBNkDrgYhAA4AGEAJBgAQDwUBgAMLAC8zGs0yERIBOTkxMAEzFhc2NzMVBwYHIyYmJwEMc3Jpglt3QpAuphdmfwYhSnOCOxlElFcpfogAAAEBLQTZA4UFYgADABG1AAEEBQADAC8zERIBOTkxMAEhFSEBLQJY/agFYokAAQElBNkDkQXlAA4AGEAJDAMQDwsEgAgAAC8yGswyERIBOTkxMAEiJiczHgIzMjY3MwYGAlaMnAloBilJVWVgCmgKpwTZiYMxOBpAQ36OAAABAKIFAgFmBd8ACwATtgYAAAwNAwkAL80REgE5ETMxMBM0NjMyFhUUBiMiJqI4Kig6OigqOAVxOTU2ODg3NwAAAgFvBNkDLQaFAAsAFwAeQAwSBgwABgAYGQ8JFQMALzPMMhESATk5ETMRMzEwARQGIyImNTQ2MzIWBzQmIyIGFRQWMzI2Ay17ZmV4eWRlfGxCMzNCPDk0QQWyYnd1YmJzd144PT04OD09AAEAJf5CAXEAAAAPABhACgAJBA0JAxARAgcALzMREgEXOREzMTAXFDMyNxUGIyI1NDY3MwYGsl4qN0E8z1ZIeERF7l4NbRK8Roc1Qm0AAAEBCATZA/AF3QAXACRADwkVGBkRAAUMAAwADBWACQAvGsw5OS8vETMRMxESATk5MTABIi4CIyIGByM2NjMyHgIzMjY3MwYGAxQrUk9JIjIzDmINc1suVk5IIDEwD2MNcQTbJS0lPD15iSUtJTs+eYkAAAIA5wTZA7YGIQAJABMAG0AMDgUTCQQUFQ0EgBMJAC8zGs0yERIBFzkxMBM2NjczFQYGByMlNjY3MxUGBgcj5yRuH7olqzphAWUxZRq6Jas6YATyMLpFFT/EMBlEsToVP8QwAAABAfwE2QMQBnMACQATtgQACwoEgAkALxrNERIBOTkxMAE2NjczFQYGByMB/Bs1DLgSbTFkBPZI41IXSu1MAAMBGwUOA4MGtAAIABQAIAArQBQPCRUbGwMICQQhIhgMCAwIDAMeEgAvM8w5OS8vETMREgEXOREzETMxMAE2NzMVBgYHIyc0NjMyFhUUBiMiJiU0NjMyFhUUBiMiJgIAQR+9IXkzUOU0JikxNyMmNAG0NCYpMTcjJjQFhamGFEOzPQQ0LjQuMjExMjQuNC4yMTH//wAAAAAFEAYKAiYAJAAAAQcBVP4g/5cAB7ICEgAAPzUA//8AmAJMAYkDWgIGAHkAAP///9QAAAR1BgoAJgAofQABBwFU/dj/lwAHsgEQAAA/NQD////UAAAFtQYKACcAKwCWAAABBwFU/dj/lwAHsgEQAAA/NQD////kAAADRAYKACcALADuAAABBwFU/ej/lwAHsgEQAAA/NQD////k/+wGAgYKACYAMkQAAQcBVP3o/5cAB7ICHAAAPzUA////1AAABYUGCgAnADwBCgAAAQcBVP3Y/5cAB7IBDQAAPzUA////5AAABjMGCgAmAXY/AAEHAVT96P+XAAeyASMAAD81AP///+n/7AKTBrQCJgGGAAABBwFV/s4AAAAMtQMCAS4RJgArNTU1//8AAAAABRAFvAIGACQAAP//AMkAAAS+BbYCBgAlAAAAAQDJAAAD+AW2AAUAHUAOAwQEAAYHBQJJWQUDBBIAPz8rERIBOTkRMzEwARUhESMRA/j9e6oFtpn64wW2AP//ACcAAARtBbYCBgIoAAD//wDJAAAD+AW2AgYAKAAA//8AUgAABD8FtgIGAD0AAP//AMkAAAUfBbYCBgArAAAAAwB9/+wFvgXNAAMADwAbAD9AIAIDEBYQChYECgQcHQADSVkAAAcNDRlJWQ0EBxNJWQcTAD8rABg/KxESADkYLysREgE5OREzETMREjk5MTABIRUhJRAAISAAERAAISAAARASMzISERACIyICAeMCdf2LA9v+nf7E/r3+oQFgAUQBOwFi+3P69PP49/L1+wMzlT/+of5uAYsBaAFlAYn+cP6g/tj+zAEwASwBKgEu/s4A//8AVAAAAlYFtgIGACwAAP//AMkAAATpBbYCBgAuAAAAAQAAAAAE0wW2AAoAGkALCAAMCwQICQMBCBIAPzM/EjkREgE5OTEwISMBJicGBwEjATME07b+tlcWIUf+uLYCELEDoPxai8n8XgW2//8AyQAABnEFtgIGADAAAP//AMkAAAU/BbYCBgAxAAAAAwBIAAAEJQW2AAMABwALADRAHQoHAwIGCAYNDAADSVkAAAoECgtJWQoSBAdJWQQDAD8rABg/KxESADkYLysREgEXOTEwEyEVIQMhFSEBFSE1wwLn/RlSA4v8dQO0/CMDSJYDBJf7eZiY//8Aff/sBb4FzQIGADIAAAABAMkAAAUMBbYABwAjQBEBAAQFAAUJCAYDSVkGAwEFEgA/Mz8rERIBOTkRMxEzMTAhIxEhESMRIQUMqv0RqgRDBR/64QW2AP//AMkAAARoBbYCBgAzAAAAAQBKAAAEXAW2AAwANUAcCAoKAAkCCwYDAgAFDQ4HCAQISVkEAwAKSVkAEgA/KwAYPysRADMREgEXOREzETMRMzEwMzUBATUhFSEnAQEhFUoB4f4rA8v9XGABzP4fA1SNAm8CK4+ZAv3f/ZqYAP//ABIAAARaBbYCBgA3AAD//wAAAAAEewW2AgYAPAAAAAMAav/sBfgFywAZACIAKwBQQCknFBoCDQ0rGQ4eBwcOFAMsLQwQGioQKkpZIiQYJEpZAhgQGBAYDhMABAA/Pzk5Ly8RMysRADMrEQAzETMREgEXOREzETMzMxEzMxEzMTABMxUzMhYWFRQCBCMjFSM1IyIkAjU0NjYzMxMzMjY1NCYrAyIGFRQWMzMC26xGq/uFlf79sCmsLbD+/pKH/KtDrBnJ3865Oqw5ttHeyhgFy7SI+J+m/v2C4eGEAQShnviL/EXbw7nS1LfF2QD//wAIAAAElgW2AgYAOwAAAAEAbQAABfIFtgAdAD5AHwoHEQAADgEVGBgBBwMeHx0DDQNJWRENDQEWDwgDARIAPz8zMxI5LzMrEQAzERIBFzkRMxEzMxEzETMxMCEjESMiJiY1ETMRFBYzMxEzETMyNjURMxEUBgQjIwODqi2w/5Cuz9Qbqh3Tz7CQ/v2vLQG+evekAeP+IbzJA2T8nMa7AeP+H6X3ewAAAQBQAAAF9AXNAB8AOUAgAw0dExgTFhkHCg0ICCAhEABJWRAEGhYGCQgJSVkZCBIAPzMrEQAzMzMYPysREgEXOREzETMxMAEiAhUUEhcVITUhJgI1EAAhIAARFAIHIRUhNTYSNTQCAyHu+q20/bYBbJegAWIBOgE7AWKelwFr/ba3qfkFNf7//eH+s4SFmHYBXssBNgFg/qX+x8/+pniYhYYBTt78AQL//wA8AAACbwclAiYALAAAAQcAav8HAVIACrQCASEFJgArNTX//wAAAAAEewclAiYAPAAAAQcAav/vAVIACrQCAR4FJgArNTX//wBz/+wExwZzAiYBfgAAAQYBVB0AAAizAjQRJgArNf//AFr/7AOHBnMCJgGCAAABBgFUyAAACLMBLxEmACs1//8AsP4UBEQGcwImAYQAAAEGAVQ7AAAIswEeESYAKzX//wCo/+wCkwZzAiYBhgAAAQcBVP7EAAAACLMBGREmACs1//8ApP/sBHEGtAImAZIAAAEGAVU7AAAMtQMCATQRJgArNTU1AAIAc//sBMcEXAALACoAR0AkCQ8nFQQEHSIdDwMrLBgPJygoFgwSEgdGWRIQHwAMAEZZJAwWAD8zKxEAMxg/KxESADk5ETMYPxESARc5ETMRMzMRMzEwJTI2NTU0JiMgERQWFyICERASMzIWFzM2NzMGBhURFDMyNxUGIyImJyMGBgJQqZaYqf7Rk4XW7vTheaE2DBgpgRUcVB0hLkFRWRINO6d3w9oP5cf+UNTUiwEpAQwBEgEpVFRcOEL2dP5Jcgp3GlFWVlEAAgCw/hQEqAYfABMAKQBMQCgYDw8QJwMeCAgDBSIQBSorEBsjIkZZDiMOIwsACxtGWQsWABRGWQAAAD8rABg/KxESADk5GC8vKwAYPxESARc5ETMRMxEzETMxMAEyFhUQBRUEERQEIyImJxEjETQ2FyIGFREWFjMyNjU0JiMjNTMyNjU0JgKT3Pn+xwF5/vjubaBPpv3knp1doVarrb6xcFybopwGH9C3/tozCCr+kdHhHyb94wY04faMrKX8iTEllp2dpI6TiXuFAAEACv4UBA4ESAASACFAEA8EAQUEExQKCQkBDgUPARsAPz8zEjkvMxESARc5MTABIzQSNwEzExYXMz4CEzMBBgICFLRAK/4/rPBeEwgFKSvqrP5rMDX+FGABJnIEPP2462cejoECbfvTfP7cAAIAcf/sBGAGEgAeACoAO0AgJRwQAx8WFgkAAxwFKywQACIDGQYZKEZZGRYGDUZZBgAAPysAGD8rERIAFzkREgEXOREzETMRMzEwASYmNTQ2MzIWFwcmJiMiBhUUFhcWFhUUACMiJDU0EgE0JicGBhUUFjMyNgIhjHTCpGe9fkhwn1FVYWun0rH+8Ozj/vDiAmF7jc6/spOirgOoTp9jgpgtP4c+LE9CR29bc/Gk6/74+NKxAQX+c4C3SjXZoJCrugAAAQBa/+wDhwRcACUATUArBBAjFx0LARMXEAYmJxQlAiUCRlkPJR8lAgsDJSUNGhohRlkaEA0HRlkNFgA/KwAYPysREgA5GC9fXl0rERIAORESARc5ETMRMzEwARUjIBUUFjMyNjcVBiMiJjU0Njc1JiY1NDYzMhYXByYmIyIVFCECy5T+yZOSVKZkid3S8W6CYmvgwGGlZD9egk/6AT0CgY3DWmInL5RLqZRigykLHH9chZ4hLYUqHKKsAAABAHP+bwOgBhQAIAAwQBgHGR4TEw4OAwAZBCEiESMeAwABAEZZAQAAPysRADMzGD8REgEXOREzETMRMzEwEzUhFQYAAhUUFhYXFhYVFAcjNjU0JicmJjU0PgI3BiGwAvDX/uCKO32slYh/pn1vj8u8O3DJ8ij+8QWHjYG0/r3+36ZidkklH21blaShazg9GiTbwnLQw+XaCAAAAQCw/hQERARcABQAL0AYABQMCAgJFAkWFRAERlkQEAwJCg8JFQAbAD8/PxI5PysREgE5OREzETMRMzEwARE0JiMiBhURIxEzFzM2NjMyFhURA556gqygpocbCDO4ccbI/hQEsYaEutb9wQRIllFZv9L7SQADAHP/7ARKBisACwASABkASUAnFhAQBhcPDwAGABobFhBGWQ8WvxYCCwMWFgMJCRNGWQkBAwxGWQMWAD8rABg/KxESADkYL19eXSsREgE5OREzETMRMxEzMTABEAIjIgIREBIzMhIBMhITIRISEyICAyECAgRK9Prw+fX09Pr+EqScBv15BJanoZYKAoULmAMM/mr+dgGTAY0BlwGI/mv74QExATP+0P7MBSn+4f7nARkBHwABAKj/7AKTBEgADwAfQA4BDgcOERAPDwsERlkLFgA/KwAYPxESATk5ETMxMAERFBYzMjY3FQYGIyImNREBTklXJWUbH2kyoJEESPz6aGUNB38NEaipAwv//wCwAAAEGwRGAgYA+gAAAAH/8v/sBEYGIQAiADNAGwgBFQMkAAAjGBNGWRgWHh8fAAsLBkZZCwEAFQA/PysREgA5ETMYPysRATMREhc5MTAjAScuAiMiBzU2MzIWFhcBFhYzMjcVBiMiJicDJicjBgcDDgHZOh4yQzE6OUQ/W3lYNgFrEyojGyEwPUpTHZxUFgkcWP4EN6JVRiQNhRE8gpj8DDEzCnkYTFMBtPBgdNH9tgD//wCw/hQERARIAgYAdwAAAAEAAAAABAIESAAOABxADAkKCgAQDwUOFQkADwA/Mj85ERIBOTkRMzEwETMTFhYXMzYSETMQAgcjrNsaUxAIsZ+mz+G6BEj9skPuPq8BvQFR/pX+BOEAAQBx/m8DoAYUADEASUAnBBktHx0cEwwMKAAcHyUZBzIzHDABMAFHWTAwECYpJSYlRlkmABAjAD8/KxEAMxESORgvKxESADkREgEXOREzETMRMxEzMTABIyIGFRQeAhcWFhUUBgcjNjY1NCYnJiY1NDY3NSY1NDY3BiMjNSEVIyIGBhUUFjMzA1aysNUyX4dUjoc2Q5w1QnOPyMeegNmLpoBzRAK6M4Lgf6evqgLyso5QYj0kEh1uWkGVY0eTNDc9GSLIsIzSJwxA2XWeMgyNg1CQX3Ns//8Ac//sBGIEXAIGAFIAAAABABn/7AT0BEgAFQA2QB0KCwcTEAMTCw0FFhcSCQ0PDUZZDw8LFQUARlkFFgA/KwAYPz8rEQAzMxESARc5ETMRMzEwJTI3FQYjIjURIREjESM1NyEVIxEUFgR9JjArVNv+I6bdjwRM1TN1EoMY/QLR/EYDukpEjv08SjcAAgCm/hQEYgRcABAAHAA2QBsVCQkKGgAKAB0eBgMODhFGWQ4QChsDF0ZZAxYAPysAGD8/KxESADkREgE5OREzETMRMzEwARAAIyInIxYVESMREBIzMhIlIgYVERYzMjY1NCYEYv8A6bN4CAio++rb/P4hnpd6t5+YkAIl/vH+1l491P7bBB8BCgEf/tGiz9H+rmbQ3tbUAAABAHP+bwOiBFwAIAAuQBcOBwAVFQcbAyIhBBISGAsYHkZZGBALIwA/PysREgA5ETMREgEXOREzETMxMAEUFhYXFhYVFAYHIzY2NTQmJicmJjUQADMyFhcHJiMiBgEfO4+glIM2Q5w2QzNuYczDART4T542NYJysKoCCoeEUCIga1pCmF9GlDIoLyYSJf7bAR4BNiEYjTPaAAIAc//sBLYESAANABkAMEAZFAAOBwcMAAsEGxoMFwkXRlkJDwQRRlkEFgA/KwAYPysRADMREgEXOREzETMxMAEUBgYjIgA1ECEhFSEWARQWMzI2NRAnIyIGBGB75Zrr/vgCUAHz/viy/L+qoZ+rrkHeyAH8nfGCASD+Aj6Op/73wtHFtgEOutAAAAEAEv/nA5MESAATACxAFwMPAAkPEQQUFQIRExFGWRMPDAVGWQwWAD8rABg/KxEAMxESARc5ETMxMAEVIREUMzI2NxUGBiMiJjURITU3A5P+UM0vYhsjbzC1qv7XlARIjv2W3w0HfQ8SqqoCf0pEAAABAKT/7ARxBEgAFQAlQBEMEwYDEwMXFg8EDwAJRlkAFgA/KwAYPzMREgE5OREzETMxMAUiJhERMxEUFjMyNjU0JiczFhYVEAACc+fopp6Zp6EcIqYkHP7+FPoBCgJY/bDAw+77guCIkNaM/sL+1AAAAgBz/hQFTARcABgAIgBBQCMKBCAYGAwAGRMTAAcEBCMkEBxGWRAQBg8gDAEMRlkXARYAGwA/PzMrEQAzGD8/KxESARc5ETMRMzMRMxEzMTABESQAERA3FwYGFRAFETQ2MzISFRQCBgcRATQmIyIGFRE2NgKD/vz+9M+DWVEBaKaVtNqI+KUBeXxmSU6zxv4UAdoLASMBDwEo/Vp14Hz+dSMCbLu+/tv6sv77kAj+JgQnudt4cv2SEOwAAf/s/hQEUAROACAAOUAhDgcIBRUYHgciFyEFGAgVBAYXGxEMRlkRGwYPABxGWQAPAD8rABg/PysAGD8SFzkRATMSFzkxMBMyFhYXEwEzARMWFjMyNxUGIyImJwMBIwEDJiYjIgc1NrI2Tj4skQE+tP5UvjBSPy0tPDtzjTuW/payAdCsJkYrJRsxBE4rW3D+jwJh/Pz+HHpKCIEPdp8Bg/1oA0QBvGNQC4ERAAEApP4UBYcGEgAaAD1AHxYTAQ4OGQ8ECgoPEwMbHBoABxQPARkQGUZZDRAWDxsAPz8zKxEAMxg/Mz8REgEXOREzETMzETMRMzEwARE2NjU0JiczEhUQAAURIxEkABERMxEUFhcRA1q8yxolpj/+4/7wpP74/vamtLgGEvppD+fMeOuo/vD0/uz+zhD+JgHaCQEiARACH/3bw9oNBZkAAQBz/+wFvARIACcAPUAeCgMmExMQGSAgEAMDKCkmEREAHAYPFg0ADUZZIwAWAD8yKxEAMxg/MxI5LzkREgEXOREzETMSOREzMTAFIgI1NBI3MwYGFRQWMzI2NREzERQWMzI2NTQCJzMWEhUUAiMiJyMGAfS2yzdErEQ5eGteaaFqXWt4N0WsQTnLttxECUEUASj+nAEBmZz/ncHYj30BN/7JgIzYwZcBBJ2S/vmd/P7Wtrb//wAJ/+wCkwXTAiYBhgAAAQcAav7UAAAACrQCASURJgArNTX//wCk/+wEcQXTAiYBkgAAAQYAajkAAAq0AgErESYAKzU1//8Ac//sBGIGcwImAFIAAAEGAVQhAAAIswIiESYAKzX//wCk/+wEcQZzAiYBkgAAAQYBVCcAAAizAR8RJgArNf//AHP/7AW8BnMCJgGWAAABBwFUAMkAAAAIswExESYAKzX//wDJAAAD+AclAiYAKAAAAQcAagAnAVIACrQCASEFJgArNTUAAQAS/+wFQgW2AB0ARkAmFg4ODwgbGxQCDxEFHh8WDUlZFhYPEhUREhFJWRIDDxIABUlZABMAPysAGD8/KxEAMxESORgvKxESARc5ETMRMxEzMTAFIic1FjMyNjU1NCYjIREjESE1IRUhESEyFhUVFAYDz2A2N1tlaIOM/oOq/rADt/5DAYzN3cQUFpYTfHCDgHH9GwUfl5f+Xr+yj77T//8AyQAAA/gHcwImAWEAAAEHAHYAWgFSAAizAQ8FJgArNQABAH3/7ATjBc0AGAA4QB4GAxEWDAURBBkaAwZJWQMDDhQUAElZFAQOCUlZDhMAPysAGD8rERIAORgvKxESARc5ETMzMTABIgQHIRUhEgAzMjcVBiMgABEQACEyFwcmA0Li/vMeAtP9KQoBC/miyaHi/rT+ogF5AU7tskepBTP68Zb+7v7jN5U5AYQBbQFfAZFYlFL//wBq/+wEAgXLAgYANgAA//8AVAAAAlYFtgIGACwAAP//ADwAAAJvByUCJgAsAAABBwBq/wcBUgAKtAIBIQUmACs1Nf///2D+fwFoBbYCBgAtAAAAAgAA/+kHIwW2ABoAIwBHQCYYGxsEHwAABA0DJCUYI0lZGBgLFhYGSVkWAwsQSlkLEgQbSlkEEgA/KwAYPysAGD8rERIAORgvKxESARc5ETMRMxEzMTABFAQhIREhAgIGBiMiJzUWMzI+AhITIREzIAEzMjY1NCYjIwcj/u3+/P65/pM5VFCLa0VAMj8wQSs3REECpnoCOv1Mhca3wNxmAarO3AUf/kj99vt5GY8aPmf6Ab4B4v2Q/U2LjIp8AAIAyQAAB1QFtgARABoASkAmCwcHCA8SEgwEFgAABAgDGxwaBgsGSVkPCwsEDQkDCBIEEkpZBBIAPysAGD8/MxI5LzMrEQAzERIBFzkRMxEzMxEzETMRMzEwARQEISERIREjETMRIREzETMgATMyNjU0JiMjB1T+8P77/rf9faqqAoOseQI5/U6FxLnB22YBqs7cArD9UAW2/ZICbv2Q/U2LjIl9AAABABIAAAVCBbYAEwA6QB8ADAwNBgUFEg0PBBQVEw8QD0lZAAtJWQAADRADBg0SAD8zPxI5LysrEQAzERIBFzkRMxEzETMxMAEhMhYVESMRNCYjIREjESE1IRUhAgwBkM3Zqn2M/n2q/rAD9v4EA328tf30AfZ+cf0bBR+Xl///AMkAAATlB3MCJgG0AAABBwB2AKIBUgAIswEUBSYAKzX//wAb/+wE+AdeAiYBvQAAAQcCNgBEAVIACLMBFwUmACs1AAEAyf6DBQwFtgALADBAGAgFAgMJAAADBQMMDQoGAwUISVkBBRIDIgA/PzMrABg/MxESARc5ETMRMxEzMTAhIREjESERMxEhETMFDP4vsP4+qgLvqv6DAX0FtvrkBRwA//8AAAAABRAFvAIGACQAAAACAMkAAAR9BbYADQAWAD1AIBIACQ4OBAQHAAMYFwkWSVkJCQQFBQhJWQUDBA5KWQQSAD8rABg/KxESADkYLysREgEXOREzETMRMzEwARQEISERIRUhETMyFhYBMzI2NTQmIyMEff79/vv+VANe/UzjwfJ0/Pbvvq2w288BqtrQBbaX/idZrv5UgpWOeAD//wDJAAAEvgW2AgYAJQAA//8AyQAAA/gFtgIGAWEAAAACAA7+gwVKBbYADQATAENAJAQFEwcQCg4MAQAADAoHBQUUFQoQSVkKAwEFIhMMBgMGSVkDEgA/KxEAMzMYPzM/KxESARc5ETMRMxEzETMRMzEwASMRIREjETMSEhMhETMhESEGAgcFSqL8CKJxmtsMApG5/p3+sxLOif6DAX3+gwIXAQMC5gEz+uQEg/L9WeoA//8AyQAAA/gFtgIGACgAAAABAAIAAAa8BbYAEQA8QB8GDQ0DDgoJCAEOABEHEhMPDAkGAwAAAQ4LERIHBAEDAD8zMz8zMxI5ETMzMzMzERIBFzkRMzMRMzEwAQEzAREzEQEzAQEjAREjEQEjAlb9wb4COaQCOr79wAJSxP26pP27xwLwAsb9PALE/TwCxP08/Q4C5f0bAuX9GwABAEr/7AQ1BcsAKABDQCQcABMHBwADFyMMBikqAxgXGBdKWRgYCiYmH0pZJgQKEEpZChMAPysAGD8rERIAORgvKxESADkREgEXOREzETMxMAEUBgcVFhYVFAQhIic1FhYzMjY1NCYjIzUzMjY1NCYjIgYHJzY2MzIWBBm3obe9/s7+6f+jYN9nxsvh39rRzeGiiW6ydVRl+4fh/wRgkLQYCBm0kc3lT54uMpaNhoqPk4RrgDJKcktNxQABAMsAAAVSBbYADwA0QBgOAgIPBgkJCA8IEBEFBAwNBA0JDxIGAAMAPzI/Mzk5ETMRMxESATk5ETMRMxEzETMxMBMzERQHMwEzESMRNDcjASPLnw4IAzS6oBEJ/Mu6Bbb80+G2BMT6SgMlyd37NQD//wDLAAAFUgdeAiYBsgAAAQcCNgDhAVIACLMBEAUmACs1AAEAyQAABOUFtgAKAC1AFgcDAwQACQoEBAsMCgcCBwQIBQMBBBIAPzM/MxI5OREzERIBFzkRMxEzMTAhIwERIxEzEQEzAQTlzv1cqqoCk8P9eQLl/RsFtv08AsT9OgABAAD/5wTZBbYAEwAtQBgDEgEAABIKAxQVEgNJWRIDCA1KWQgTARIAPz8rABg/KxESARc5ETMRMzEwISMRIQcCAgYnIic1FjMyNjYSEyEE2ar+JR89XZh+Sjs2OzVPPV04AxIFH/D+If5FrgIZjxpX1wJZAbj//wDJAAAGcQW2AgYAMAAA//8AyQAABR8FtgIGACsAAP//AH3/7AW+Bc0CBgAyAAD//wDJAAAFDAW2AgYBbgAA//8AyQAABGgFtgIGADMAAP//AH3/7ATPBcsCBgAmAAD//wASAAAEWgW2AgYANwAAAAEAG//sBPgFtgAWACpAFRIIAgkEFxgODQgNABEJAwAFSVkAEwA/KwAYPzMSOTkRMxESARc5MTAFIic1FjMyNjcBMwEWFzM2NwEzAQ4CASVvVF1gboVC/ce8AbAZDggcCwFntP4tVIepFB6mK2WLBEH8wTEvVBYDNfvqu6pP//8Aav/sBfgFywIGAXMAAP//AAgAAASWBbYCBgA7AAAAAQDJ/oMFuAW2AAsAMkAZCAUJAAMCAgAFAwwNCgYDAAgFCElZBRIDIgA/PysRADMYPzMREgEXOREzETMRMzEwJTMRIxEhETMRIREzBQysofuyqgLvqpr96QF9Bbb65AUcAAABAKoAAATHBbYAEwAtQBYLCBEBAQAIABQVBQ5JWQUFARIJAwESAD8/MxI5LysREgE5OREzETMRMzEwISMRBgYjIiY1ETMRFBYzMjY3ETMEx6qVxmrP36p/j2GxqaoCXDUnvrMCRf3PeXQdNwLKAAEAyQAAB3kFtgALADFAGAQBCAUJAAAFAQMMDQoGAgMIBAEESVkBEgA/KxEAMxg/MzMREgEXOREzETMRMzEwISERMxEhETMRIREzB3n5UKoCWKoCWKwFtvrkBRz65AUcAAEAyf6DCAQFtgAPADtAHgMABwQICw4NDQsEAAQQEQ4iCQUBAwsHAwADSVkAEgA/KxEAMzMYPzMzPxESARc5ETMRMxEzETMxMDMRMxEhETMRIREzETMRIxHJqgJHrAJIqqyiBbb65AUc+uQFHPrk/ekBfQAAAgASAAAFFwW2AAwAFQA9QCAJDQ0EEQAABAYDFhcJFUlZCQkEBwcGSVkHAwQNSlkEEgA/KwAYPysREgA5GC8rERIBFzkRMxEzETMxMAEUBCMhESE1IREzIAQBMzI2NTQmIyMFF/79+f5H/rAB+vQBBQES/PX8tamvy+ABqs7cBR+X/ZDN/hqLjIh+AAADAMkAAAYKBbYACgATABcAP0AgAwsLAA8HFRQUBwADGBkVEgMTSVkDAwAWAQMAC0pZABIAPysAGD8zEjkvKwAYPxESARc5ETMRMxEzETMxMDMRMxEzIAQVFAQjJTMyNjU0JiMjASMRM8mq7wEFARL+/fn+9ve1qrPI2wSXqqoFtv2Qzc/O3JGNjIl7/VIFtgACAMkAAAS6BbYACgASADJAGQcLCwQOAAQAExQHEklZBwcEBQMEC0pZBBIAPysAGD8SOS8rERIBOTkRMxEzETMxMAEUBCMhETMRISAEASEgETQmIyEEuv7x+/4ZqgEjAQsBGfy5ASsBbLvO/vIBqsvfBbb9kNP+IAEXh38AAQA9/+wEiQXLABoAOkAfGBUVCQkWDwMEGxwXFklZFxcMBQwSSVkMEwUASVkFBAA/KwAYPysREgA5GC8rERIBFzkRMxEzMTABIgcnNjMyBBIVEAAhIic1FhYzIAATITUhJgAB06yiSKzs2QE5ov6U/qrjnFOsYwEPARQI/TECzRb+8QUzTJBUsP663f6I/mw5lRUiASEBEJjlAQIAAgDJ/+wH5wXNABIAHgBHQCYMCAgJEw0GGQAABgkDHyAQHElZEAQMB0lZDAwJCgMJEgMWSVkDEwA/KwAYPz8SOS8rABg/KxESARc5ETMRMzMRMxEzMTABEAAhIAADIREjETMRIRIAISAAARASMzISERACIyICB+f+q/7Q/tP+qwv+nqqqAWQXAVEBHwEzAVb7oO7n6u3r6OnwAt3+nv5xAW8BVf1QBbb9kgE3AU7+b/6h/tj+zAEyASoBKgEu/s8AAgAzAAAETgW2AA0AFQA9QCAVDAwLEgYCBgMLBBcWABRKWQMJAAACCQkPSlkJAwwCEgA/Mz8rERIAORgvEjkrERIBFzkRMxEzETMxMAEBIwEmJjU0JCEhESMRESMiBhUQITMCe/6ByQGaoZIBDwETAZKq47e+AXvdAmL9ngJ/M8+exNP6SgJiAsF+jv7d//8AXv/sA80EWgIGAEQAAAACAHf/7ARUBiEAFwAiADtAHhoSIAsAAAYSAyQjDAsPHEZZCw8PFQUVGEZZFRYFAQA/PysREgA5GC85KxEAMxESARc5ETMzETMxMBMQEjckNxcEBwYGBzM2NjMyEhUQACMiAAUgERAhIgYGBxASd9TmAR7aH/6llZGRBww+xGvK4v766uf++gH8ATH+60yNdSCmApEBaAGTMj0mkjoiIfbUVGD++uj+//7fAWLXAYUBcz9oN/75/u0AAwCwAAAETARIAA4AFgAfAElAJhwUFAsXAA8HBwADCwQgIQQcExwTRlkcHAsMDBtGWQwPCxRGWQsVAD8rABg/KxESADkYLysREgA5ERIBFzkRMxEzETMRMzEwARQGBxUWFhUUBiMhESEgAzQmIyERISADNCYjIREhMjYEKXtvjIHh2P4dAeEBmIOHnP7TATEBHx97ff7HARmafgM1a28TCRN+b5mmBEj9AllR/pcCmlBD/stMAAABALAAAANEBEgABQAdQA4CAwADBwYEAUZZBA8DFQA/PysREgE5OREzMTABIREjESEDRP4SpgKUA7r8RgRIAAIAKf6FBGgESAANABMAQ0AkBAUTBxAKDgwBAAAMCgcFBRQVChBHWQoPAQUiEwwGAwZGWQMVAD8rEQAzMxg/Mz8rERIBFzkRMxEzETMRMxEzMTABIxEhESMRMzYSEyERMyERIwYCBwRoof0CoFaGmAMCK53+w/YNkWz+hQF7/oUCCrYB6gEZ/EcDNt7+OZEA//8Ac//sBBIEXAIGAEgAAAABAAQAAAXfBEYAEQA8QB8CCQkRCgYEBQoODw0HExIRCwgFAg4ODQMADw8KBw0VAD8zMz8zMxI5ETMzMzMzERIBFzkRMzMRMzEwATMRATMBASMBESMRASMBATMBAqSZAcW2/jYB8cD+Hpn+H78B8P43tgHDBEb97QIT/e39zQIr/dUCK/3VAjMCE/3tAAEARP/sA38EXAAiAE1AKwINHhMTDQ8hCBgGIyQQIiEiIUZZDyIfIgILAyIiFgoWG0ZZFhYKBEZZChAAPysAGD8rERIAORgvX15dKxESADkREgEXOREzETMxMAEgNTQjIgYHJzYzMhYVFAcVFhYVFAYjIic1FjMyNjU0ISM1AYEBN/xNfmY7qsm92s1+dPXY7YG3u5CT/smYAoGsohwqh0ybhrg5CCWJZ5ipR5hWY12/jQABALAAAARiBEgADQA0QBkIBAcHBgsDAwwGDA8OAwoMBA0PDBUHFQQPAD8/Pz8REjk5ERIBOTkRMxEzETMRMzMxMAERBwcBMxEjETc3ASMRAUwHAwJRz5sDBf2wzwRI/Um2OQOm+7gCnoSC/FwESAD//wCwAAAEYgYMAiYB0gAAAQYCNj0AAAizAQ4RJgArNQABALAAAAQMBEgACgAtQBYKBgYHAwECBwQMCwIKBQoHAAgPBAcVAD8zPzMSOTkRMxESARc5ETMRMzEwATMBASMBESMRMxEDL7b+JwIAwv4MpqYESP3v/ckCK/3VBEj96wABABD/8gPhBEgAEAAtQBgBAAMPCg8AAxIRDwNGWQ8PBwxHWQcWARUAPz8rABg/KxESARc5ETMRMzEwISMRIQICBiMiJzUWMzISEyED4aj+txtgmXY2IBYcc4gjAoEDuv6c/l7CDHsGAeYB7wABALAAAAUvBEYAFAA1QBkDBgYFEg8PEAUQFhUHDgAOCwMRDwYQFQsVAD8/Mz8zEjk5ETMREgE5OREzETMRMxEzMTAlNzcBMxEjEQcHASMBJicRIxEzARYC6R8rASnTkxQ6/uWL/uU1FJTLAR8roF12AtP7ugOJOpn9SgK4hkv8dwRG/UluAAEAsAAABGIESAALADlAHgIGBgUBCQkKBQoNDAEIRlkvAT8BAgEBCgMLDwYKFQA/Mz8zEjkvXSsREgE5OREzETMRMxEzMTABESERMxEjESERIxEBVgJmpqb9mqYESP41Acv7uAHu/hIESP//AHP/7ARiBFwCBgBSAAAAAQCwAAAESARIAAcAI0ARAAEFBAEECAkCB0ZZAg8FARUAPzM/KxESATk5ETMRMzEwISMRIREjESEBVqYDmKj9tgRI+7gDuAD//wCw/hQEdQRcAgYAUwAA//8Ac//sA4sEXAIGAEYAAAABACkAAAOTBEgABwAkQBICAwADBQMICQEFBgVGWQYPAxUAPz8rEQAzERIBFzkRMzEwASERIxEhNSEDk/6cpv6gA2oDuvxGA7qO//8AAv4UBAYESAIGAFwAAAADAHH+FAVGBhQAEQAYAB4ATEAnEgkcDwQEFQwFGQAABQkDHyANABsWDBZGWQ8MEBwVBhVGWQMGFgUbAD8/MysRADMYPzMrEQAzGD8REgEXOREzETMzMxEzMxEzMTABFAAHESMRJgA1NAA3ETMRFgAFFBYXEQYGBRAlETY2BUb+5f6k+P7gAR//nvsBHvvZsMC5twN7/pO+rwIl+f7ZFf4kAdwTAS70+QEmFAG8/kQX/tTwwNoSA1QRz8gBfyf8rhPa//8AJwAABAgESAIGAFsAAAABALD+hQTdBEgACwAyQBkGAwcKAQAACgMDDA0IBA8KBgMGRlkDFQEiAD8/KxEAMxg/MxESARc5ETMRMxEzMTABIxEhETMRIREzETME3ab8eaYCRqab/oUBewRI/EcDufxHAAEAnAAABC0ESAASAC1AFgYKCgkBEQkRFBMDDkZZAwMKBxIPChUAPz8zEjkvKxESATk5ETMRMxEzMTABERQzMjY3ETMRIxEGBiMiJjURAULbW6ZppqZps3GkugRI/nDAOEMB1fu4AfBIO6yTAZwAAQCwAAAGbwRIAAsAMUAYCAUACQEEBAkFAwwNCgIGDwAIBQhGWQUVAD8rEQAzGD8zMxESARc5ETMRMxEzMTAlIREzESERMxEhETMD4QHmqPpBpgHlpo8Dufu4BEj8RwO5AAABALD+hwcKBEYADwA7QB4MCQANAQQHBgYEDQkEEBEOAgoPBAAMCQxGWQkVByIAPz8rEQAzMxg/MzMREgEXOREzETMRMxEzMTAlIREzETMRIxEhETMRIREzA+EB5qadqPpOpgHlpo8Dt/xJ/fgBeQRG/EkDtwAAAgApAAAFHQRIAAwAFAA9QCAAEhIIDQQECAoDFRYAEUZZAAAICwsKRlkLDwgSRlkIFQA/KwAYPysREgA5GC8rERIBFzkRMxEzETMxMAEhMhYVFAYjIREhNSEBNCYjIREhIAItATng19/c/iX+ogIEAkx8nf7NATkBEwKDmpumqAO6jvz8XVP+lwAAAwCwAAAFeQRIAAoADgAWAD9AIAAQEAgEEwwLCxMIAxcYDBUAD0ZZAAAIDQkPCBBGWQgVAD8rABg/MxI5LysAGD8REgEXOREzETMRMxEzMTABITIWFRQGIyERMwEjETMBESEgNTQmIwFWASvRydXP/jmmBCOmpvvdARkBCHqTAoObmqWpBEj7uARI/az+l7lcVAACALAAAARMBEgACQASADJAGQ8DAAsLBwMHFBMACkZZAAAHCA8HC0ZZBxUAPysAGD8SOS8rERIBOTkRMxEzETMxMAEhIBEUBiMhETMRESEyNjU0JiMBVgFSAaTb0/4SpgFAhIyBlAKD/suirARI/az+l1xdW1UAAQA5/+wDfQRcABoAREAmDAkJGBgKEgIEGxwLCkZZDwsfCwILAwsLABUVD0ZZFRAABkZZABYAPysAGD8rERIAORgvX15dKxESARc5ETMRMzEwBSInNRYWMzI2NyE1ISYmIyIHJzY2MyAAERAAAVandjyMW669Cv3VAikQqaFnly83pFABAAEK/t8UOZMXJLq5jaygNowaI/7b/uz+8/7WAAIAsP/sBjMEXAASAB4AUUAtDAgICRMNBhkAAAYJAx8gEBxGWRAQDAdGWQ8MHwwCCwMMDAkKDwkVAxZGWQMWAD8rABg/PxI5L19eXSsAGD8rERIBFzkRMxEzMxEzETMxMAEQACMiAichESMRMxEhNjYzMgABFBYzMjY1NCYjIgYGM/7/4NX6Dv7hpqYBIRT8z9wBAfzukqGelZKhoZICJf7z/tQBC/f+EgRI/jXk+/7P/vrT29XZ0tjYAAIAJQAAA8EESAANABQAPUAgEQsLCg4FAQUCCgQWFQ0QRlkCCA0NAQgIE0ZZCA8LARUAPzM/KxESADkYLxI5KxESARc5ETMRMxEzMTAzIwEmJjU0NjMhESMRIQEUISERISLnwgE7f4fKtQHopv7r/vYBFAEL/tPyAc8coXqWrPu4AbYBTr4Bcv//AHP/7AQSBdMCJgBIAAABBgBqCAAACrQDAjARJgArNTUAAQAU/hQERAYUACcAZkA6HRsXDw8UEAclJRkCEBIFKCkeHSELRlkaEhMSR1kXEw8THxMvEwMJAx0hExMhHQMQFQAQFQAFRlkAGwA/KwAYPz8SFzkvLy9fXl0RMysRADMrEQAzERIBFzkRMxEzMxEzMzMxMAEiJzUWMzI1ETQmIyIGFREjESM1MzUzFSEVIRUUBzM2NjMyFhURFAYDL080OjeBeoKtnaicnKYBkf5vCAoxtXTJyYn+FBmJFKoDUoaEvNP95wTbf7q6f8RUOE9bv9L8tpyq//8AsAAAA0QGIQImAc0AAAEGAHbxAAAIswEPESYAKzUAAQBz/+wDqgRcABkAREAmDxISAwkYEQMEGhsPEkZZDw8fDwILAw8PAAYGDEZZBhAAFUZZABYAPysAGD8rERIAORgvX15dKxESARc5ETMRMzEwBSIAERAAMzIWFwcmIyIGByEVIRYWMzI3FQYCefj+8gET+1KeOTGPbaSqEAIp/dUJqqeMl3QUASMBEAETASogGY0zo6mNvrU7kzn//wBq/+wDcwRcAgYAVgAA//8AogAAAWYF3wIGAEwAAP///+wAAAIfBdMCJgDzAAABBwBq/rcAAAAKtAIBGREmACs1Nf///5H+FAFmBd8CBgBNAAAAAgAQ//IGQgRIABUAHQBMQCkJFAAbGwcWBAQHFA4EHh8AGkZZAAAMFBQJRlkUDwwRR1kMFQcbRlkHFQA/KwAYPysAGD8rERIAORgvKxESARc5ETMRMxEzETMxMAEzMhYVECEhESECAiMiJzUWMzISEyEBNCYjIxEzIAOw9NPL/kv+Zf7+KLWrOCAWHHOIIwJQAex9nuftARUCg5ua/rIDuv36/j4MewYB5gHv/PxbVf6XAAIAsAAABqQERgARABkASkAmDwsLDAETExAIFgUFCAwDGhsSCg8KRlkBDw8IEQ0PDBUIE0ZZCBUAPysAGD8/MxI5LzMrEQAzERIBFzkRMxEzMxEzETMRMzEwAREhMhYVECEhESERIxEzESERExEzIDU0JiMEAAEA2cv+Tv5g/gqsrAH6pvABFICZBEb+O5ma/rIB7v4SBEb+NwHJ/a7+l7lcVAD//wAUAAAERAYUAgYA6QAA//8AsAAABAwGIQImAdQAAAEGAHYzAAAIswEUESYAKzX//wAC/hQEBgYMAiYAXAAAAQYCNrcAAAizARYRJgArNQABALD+hwRGBEYACwAyQBkEAQoLBQgICwEDDA0LIgYCDwkBAQRGWQEVAD8rEQAzGD8zPxESARc5ETMRMxEzMTAhIREzESERMxEhESMCL/6BpgJKpv6PpgRG/EkDt/u6/ocAAAEAyQAABAgG4wAHACNAEQADBQYDBgkIBwRJWQEHAwYSAD8/xisREgE5OREzETMxMAERMxEhESMRA2ai/WuqBbYBLf46+uMFtgAAAQCwAAADRAWJAAcAJ0ASBQACAwADCQgGBAQBR1kEDwMVAD8/KwAYEMYREgE5OREzETMxMAEhESMRIREzA0T+EqYB7qYDx/w5BEgBQQD//wAbAAAHTAdzAiYAOgAAAQcAQwEXAVIACLMBGwUmACs1//8AFwAABiMGIQImAFoAAAEGAENzAAAIswEeESYAKzX//wAbAAAHTAdzAiYAOgAAAQcAdgGwAVIACLMBIwUmACs1//8AFwAABiMGIQImAFoAAAEHAHYBGwAAAAizASYRJgArNf//ABsAAAdMByUCJgA6AAABBwBqAWQBUgAKtAIBLwUmACs1Nf//ABcAAAYjBdMCJgBaAAABBwBqAM8AAAAKtAIBMhEmACs1Nf//AAAAAAR7B3MCJgA8AAABBwBD/5QBUgAIswEKBSYAKzX//wAC/hQEBgYhAiYAXAAAAQcAQ/9hAAAACLMBFxEmACs1AAEAUgHZA64CcQADABG1AAIEBQABAC8zERIBOTkxMBM1IRVSA1wB2ZiYAAEAUgHZB64CcQADABG1AAIEBQABAC8zERIBOTkxMBM1IRVSB1wB2ZiY//8AUgHZB64CcQIGAgMAAAAC//z+MQNO/9MAAwAHABxACwQACQUBAQgFBgIBAC8zLzMRATMRMxEzMjEwASE1ITUhNSEDTvyuA1L8rgNS/jGLjIsAAAEAGQPBAUQFtgAHABK2AQUICQAEAwA/zRESATk5MTATJzYSNzMGByUMFmI4e0IlA8EWWgEMef73AAABABkDwQFEBbYABwAStgUBCAkFBwMAP8YREgE5OTEwARcGAgcjEjcBNQ8aYjV6RiAFthZk/vdyAR3YAP//AD/++AFtAO4CBgAPAAAAAQAZA8EBRgW2AAcAErYCBgkIAwcDAD/NERIBOTkxMBMWFyMmAic33yVCey1tGA4Ftvv6XgEcZRYAAAIAGQPBArQFtgAHAA8AGkAMBAENCQQQEQAIAwwDAD8zzTIREgEXOTEwASc2EzMGAgchJzYSNzMGBwGWDzh6ex47Df3XDBZiOHtCJQPBFtcBCHP+32EWWgEMef73AAACABkDwQK0BbYABwAQABpADAkNAQUEERINBRAHAwA/M8YyERIBFzkxMAEXBgIHIxI3IRcGAgcjNhI3ATUPGmI1ekYgAicOGGA4fRpCDQW2FmT+93IBHdgWW/72emQBNF0A//8AGf75ArQA7gEHAgsAAPs4ACC3AQAHQA0NSAe4/8CzDAxIB7j/wLMJCUgHABErKys1NQABAHsAAAOJBhQACwBDQCEJAgIIAwoBAQcEAAQDBQQMDQAFBQsGBgcIAAEEBAoHAxIAPy4zMxEzPxI5LzMzETMREgEXOREzMxEzETMzETMxMAElEyMTBTUFAzMDJQOJ/qAxxDH+tAFMMcQxAWAD5x/7+gQGH6oeAaH+Xx4AAQB7AAADmgYUABUAdUA6DAcVEAQEDwoFFBEAAwMOCwkGEwEBBgUHBBYXAQgIAgcDBgYACRQLCxEOEwwMEgkODQcNBw0FDwAFEgA/PxI5OS8vEjk5MjIRMxEzMxEzETMzETMRMzMRMxESARc5ETMRMzMzMxEzMzMRMzMzETMzETMxMAElFSUTIxMFNQUDEwU1BQMzAyUVJRMCOQFh/p8xxjH+pgFaKyv+pgFaMcYxAWH+nysB5x+oHf6FAXsdqB8BKwEbH6geAXz+hB6oH/7lAAEApAH0Al4D4wALABO2BgAADA0JAwAvzRESATkRMzEwEzQ2MzIWFRQGIyImpHFsaXRzamtyAux5fnx7d4GDAP//AJj/4wWuAPIAJgARAAAAJwARAhIAAAAHABEEJQAAAAcAZP/sCTsFywAJABQAGAAkAC8AOwBGAFtAMAAQBQowQjY8GSsfJSUrPBVCChcQCEdIHDMzKD8ZAw0iOTktRA1EDUQXGAYXGAcSBwA/Mz8/Ejk5Ly8RMzMRMxEzPzMzETMREgEXOREzETMRMxEzETMRMzEwExQWMzIRECMiBgUUBiMiJjUQITIWJQEjAQEUFjMyNjU0JiMiBgUUBiMiJjUQITIWBRQWMzI2NTQmIyIGBRQGIyImNRAhMhbsU120tF1TAe2hnJWjATiYpQJp/NWUAysCoFNdW1lZW11TAe2im5SjATeWp/s4UV1bWVlbXVEB66KblaMBOJanBAKqqgFUAVKoqubn7t8ByfDb+koFtvwCq6mnraulpavm5u/dAcns3aupp62rpaWr5ubu3gHJ7AD//wCFA6YBPwW2AgYACgAA//8AhQOmArAFtgAGAAUAAAABAFIAdQIfA74ABgAaQAoEAgMGAgYIBwUBAC8vERIBOTkRMxEzMTATARcBAQcBUgFWd/7fASF3/qoCJwGXRf6i/qFHAZcAAQBQAHUCHQO+AAYAGkAKAwAEAgACCAcFAQAvLxESATk5ETMRMzEwAQEnAQE3AQId/qh1AR/+4XUBWAIM/mlHAV8BXkX+aQD//wCY/+MDSgW2ACYABAAAAAcABAHBAAAAAf55AAACjwW2AAMAE7cABQIEAwMCEgA/PxEBMxEzMTABASMBAo/8eY8DhwW2+koFtgABAG0DIQLDBccAEgAmQBEAEgwICAkSCRQTBA8fAAkKHwA/zTI/MxESATk5ETMRMxEzMTABETQmIyIGFREjETMXMzYzIBURAkxOUHJbdGAOCkuRAQIDIQGkVEdpev6kAplYZfr+VAABAGIAAAQjBbYAEQBLQCgOAAQECQULEAIFBwUSEwMHCAdOWQAIDhFMWQgOCA4FCgoNTFkKBgUYAD8/KxESADk5GC8vKxEAMysRADMREgEXOREzMxEzMzEwASEVIREjESM1MxEhFSERIRUhAbgBNP7MprCwAxH9lQJE/bwBi4H+9gEKgQQrl/3plwABAEQAAARIBckAJQBwQEANCRERIh4aCw8VAg8aHCAXByYnEBwdHE5ZDR0MICEgTlkJIQ8hHyE/IU8hBAkDHSEdIRcAFxRMWRcYAAVLWQAHAD8rABg/KxESADk5GC8vX15dETMrEQAzETMrEQAzERIBFzkRMxEzMzMRMzMxMAEyFwcmIyIGFRUhFSEVIRUhFRQGByEVITU2NTUjNTM1IzUzNTQ2ArDJnjyYk3p+AaT+XAGk/lxBSgMb+/zOyMjIyOAFyVCDR4eBuoGmgSFkiCyajTDzI4Gmgc+yzQAAAwCa/+wF0QW2ABYAIQAqAGBANyIcHB0mFxAUFA0JAhIJFwsdBissGyJLWRATTlkDGwsQDg4QCxsDBR0eHipLWR4GHRgGAE1ZBhkAPysAGD8/KxESABc5GC8vLy8vKysREgEXOREzMxEzETMRMxEzMTAlMjY3FQYjIiY1ESM1NzczFTMVIxEUFgEUBCEjESMRISAWATMyNjU0JiMjBU4iVgs8bm2BnZ0+Yt3dNP6R/uv+9kClAQYBAP79oTTIuay3UnUOBH0eiIoBz1BFv9OB/kdNUgOX4+r9wQW20/3ukaKRjgAAAQA//+wEiQXLACYAcUA/HRcfFhYaCwIHBxokEQQKGhcGJygLFxgXTlkIGAUdHh1OWQIeDx4fHi8eAwkDGB4YHhMiIgBMWSIHEw5MWRMZAD8rABg/KxESADk5GC8vX15dETMrEQAzETMrEQAzERIBFzkRMxEzMxEzETMRMzEwASADIRUhBxUXIRUhFhYzMjcVBiMiAAMjNTMnNTcjNTMSADMyFwcmAxv+wU8B/v30AgIBz/5BJcuqnJmSq+3+3y6mmAICmKQnASTtyaVHpgU1/m2BOUAtgbTFQpZBAQ0BAYEqLFCBAQUBJGGLVgAEAI3/+AYKBcEAAwAPABcAKwBFQCQlGyAqEAoUBAQACioCGwYsLSMeBhIHGBYNJxgNGA0YAgMGAhgAPz8SOTkvLxEzETM/Mz8zERIBFzkRMxEzETMRMzEwAQEjAQEUBiMiJjU0NjMyFgUUMzI1NCMiJSImNTQ2MzIXByYjIhUUMzI3FQYFH/zVlAMrAX+plIuqp5SNqv4VsrCwsv3Kpra8q2hYIVFQ4NxiWk4FtvpKBbb7mJ+3uZ2euLqc7u7r27GhqLMjZx/u6yFlJQACAHf/7AOcBcsAHAAkAD1AHyMaGg8JHRYDFgkMBCUmIw8NGQoFDBMCDAIMBh8TAAYALzMvMxI5OS8vERIXORESARc5ETMRMzMRMzEwJTI3MwYGIyImNTUGBzU2NxE0NjMyFhUUAgcRFBYTNCMiBhURJAJ9rhJfCJmOlqBgYE5ylod1h86vUq5/Qz4BAG/VprK1qfMjFnEVJgHyip+hirn+0Er+5Wh7BCvCVmz+S4kAAAQAyQAAB8MFtgAPABsAJwArAF9AMQkGBgcBDQ0AHBYiEBArKBYABwYsLR8TJRkLKBMDGQgTGRMZKAgoKUpZKBIOCAMBBxIAPzM/Mz8rERIAOTkYLy8REjkREjkRMxEzERIBFzkRMxEzETMRMxEzETMxMCEjASMSFREjETMBMyY1ETMBFAYjIiY1NDYzMhYFFBYzMjY1NCYjIgYDNSEVBMe7/UwIEJfCAqoIDpgC/KGTi6Khk4ui/iJRXVtPT1tcUlYCAATL/uBs/MEFtvs69YoDR/y3o7i7oKO1u51ydnVzc3Bw/SCHhwACACUC5QWFBbYABwAYAE9AJwABDwwMDREUFBMTDQYBAwUZGhcWCQoKEQ4OBAcDAwQQCAgUDQEEAwA/xDIyOS8zETMRMxEzETMzETMzMxESARc5ETMRMxEzETMRMzEwASMRIzUhFSMBAyMXESMRMxMTMxEjETcjAwFxe9ECH9MCWMkIBne7xMu0fwYI0wLlAmdqav2ZAi+B/lIC0f3RAi/9LwGkif3TAP//AFAAAAX0Bc0CBgF2AAAAAgBm/90EiwRIABcAHwA0QBofDg4EGAwMFQQDICENFC8fPx8CHx8RHAgRAAAvMi8zEjkvXTkzERIBFzkRMxEzETMxMAUiJgI1NDY2MzIWEhUhERYWMzI2NxcGBhMRJiYjIgcRAnmd8YWK9JWY84f8xTGmUoO3UUhi2ZMyo1iteiOTAQWdq/+Mjv79pf6cNUZpgSmbfAKLARU1QnX+6f//AEf/7AXzBbYAJwIXAlwAAAAmAHv7AAEHAkADYP2zAAu0BAMCGRkAPzU1NQD//wAg/+wGCAXJACcCFwKiAAAAJwJAA3X9swEGAHX/AAALtAEDAg4ZAD81NTUA//8AR//sBgQFtgAnAhcCnAAAACYCPQwAAQcCQANx/bMAC7QEAwIsGQA/NTU1AP//AGr/7AYABbYAJwIXAkYAAAAnAkADbf2zAQYCPzEAAAu0AQMCDhkAPzU1NQAAAgBm/+wENQXHABoAKABBQCImBx8PDwAAFAcDKSoLIkdZDgQLCxgEGBFGWRgDBBtGWQQWAD8rABg/KxESADkYLxI5KxESARc5ETMRMxEzMTABEAIEIyImNTQSNjMyFhc3ECEiBgc1NjYzMhIBMjYSNyYmIyIGBhUUFgQ1p/7sray7iOiXYZIrBP7mPpAwL5tK0tj9ol+meBYZgFBlpWVlA6b++v416cnAqQEzoV1LWgGVLCGfFyX+7PvGkAEDlmFshPqAdoIAAgAnAAAEbQW2AAUADAAoQBMJBQoEBQQODQYFAQUJSVkFEgEDAD8/KxESADkREgE5OREzETMxMDcBMwEVIQEGBwEhASYnAc+mAdH7ugIhPSj+/ALR/v5EaAVO+rBmBPThefz+AvnKAAABAMn+EAUhBbYABwAjQBEABwMEBwQJCAUCSVkFAwAEGwA/Mz8rERIBOTkRMxEzMTABESERIxEhEQR3/PyqBFj+EAcN+PMHpvhaAAEATP4QBN0FtgALADFAGgcJCQMACAIKBgIABAwNBAdJWQQDAAlJWQAbAD8rABg/KxESARc5ETMRMzMRMzEwEzUBATUhFSEBASEVTAJ3/ZkEQPywAkP9pAOq/hBrA5wDM2yX/Pz8jZgAAQBoAo0EKQMXAAMAFUAJAgAFBAEAUFkBAC8rERIBOTkxMBM1IRVoA8ECjYqKAAEAJf/yBLwGmAAIABxACwgKAwkDBgQEAQgBAC8vEjkvOTMRATMRMzEwBSMBIzUhEwEzAm9//um0ASHrAgKJDgMOh/1UBb0AAAMAdwGTBS0EDAAVACEALQAzQBgfDCsAACUZDAQuLyIcHBEGCRMPKBYWAwkALzMzETMvMxI5OTMRMxESARc5ETMRMzEwARQGIyImJwYGIyImNTQ2MzIXNjMyFgEyNjcmJiMiBhUUFgEiBgcWFjMyNjU0JgUtp4BdmUE8mViDqKiDtXp8uYWi/H1CbTYybUhMZGECoUJtNzNuR0xkZQLPg7lqdGhxrY6Gs9vXr/67W2RhXWlXU2oBeVxiYV5rVFVpAAEADP4UAvgGFAAUABxADAgSAhINAxUWEAsFAAAvMi8zERIBFzkRMzEwATIXFSYjIhURFAYjIic1FjMyNREQAn1PLDE+sKWjSjs9OrYGFBCJFvP64bC7E4cW8wUfAWoAAAIAYgGHBC0EHwAXAC8AcEBAKA8bAw8DMTAnHh4YUFkPHh8eLx4DCQMeKkAqJFBZGypADwYGAFBZDwYfBi8GAwkDBhJAEgxQWQMAEhASIBIDEgAvXcQrABoYEM1fXl0rABAYxBrexCsAGhgQzV9eXSsAEBjEERIBOTkRMxEzMTABIgYHNTYzMhYXFhYzMjY3FQYjIiYnJiYDIgYHNTYzMhYXFhYzMjY3FQYjIiYnJiYBUDZ/OWyUQ3BYTVstNYA2ZZlDb1hJWzE5gDVqlkV0UkVfMTeBM2SaRXZPVFUCAEA5lm4cJSEZQjmXbR0lHhkBlkQ1lW0gIh0aQjeWbiAhIhgAAAEAaACmBCkFAgATAEZAJgUBEAsLCQoOBAATAQgUFQ0FBgVQWQoIDwYBCQMGDgIBAlBZEhEBAC8zxCsRADMYL19eXcYzKxEAMxESARc5ETMRMzEwASE1IRMhNSETFwchFSEDIRUhAycBff7rAVR//i0CE4d9bQEX/qqBAdf96YN9AcGJARCJAR855on+8In+5Tf//wBoAAEEKQTZAiYAHwAAAQcCKwAA/XQACbMBAAcSAD81NQD//wBoAAEEKQTZAiYAIQAAAQcCKwAA/XQACbMBAAcSAD81NQAAAgBvAAAEPQXDAAUACQAgQA0IAAYDAAMKCwkHAgUCAC8vEjk5ERIBOTkRMxEzMTATATMBASMJA28BwkgBxP48SAFi/sP+wwE9At8C5P0c/SEC4QIT/e397AD//wAdAAAEHAYfACYASQAAAAcATAK2AAD//wAdAAAEDAYfACYASQAAAAcATwK2AAAAAQDbBNkDvgYMAA0AGEAJCwMPDgoEgAcAAC8yGswyERIBOTkxMAEiJiczFhYzMjY3MwYGAki5qgqcCVtxZ2MLnQyyBNmPpGhSWGKelQAAAf+R/hQBVgRIAAwAHUANCwgIDg0JDwAFRlkAGwA/KwAYPxESATkRMzEwEyInNRYzMjY1ETMRECtfO0VDTkmm/hQZhxRVVwT8+xD+vAAAAQGJBM0CdQYUAAkAE7YJBAoLBIAJAC8azRESATk5MTABNjY3MxUGBgcjAYkTJwqoC1gvWgTlN6dREjO8RgABAXH+OwJv/4MACQATtgkECgsJgAQALxrNERIBOTkxMAE2NjczFQYGByMBcRwzB6gLYjda/lRAujUSM8FCAAEBgQTZAn8GIQAJABO2CQQKCwmABAAvGs0REgE5OTEwAQYGByM1NjY3MwJ/HTUGpg5jMVwGCD3BMRM9vzkAAgAnAjkCngXHAAsAFQAgQA4GDAARDBEXFgkTHwMOIQA/Mz8zERIBOTkRMxEzMTATFBYzMjY1NCYjIgYFECEiJjUQITIWsFJeXlZWXl5SAe7+xJ6dATuengQAqKalq6qkpan+N+zdAcXoAAIAFAJKArQFvAAKABQAPEAfFAULBwMDCQIAAgUDFRYBBQUJDxQfFAIUFAMOBx8DIAA/PzMSOS9dMzMRMxESARc5ETMzETMzETMxMAEjFSM1ITUBMxEzITU0Nw4DBwcCtH2R/m4BmIt9/vIGBRgeHguoAxTKymUCQ/3Nw4ZLDCctLRH2AAEAOwI3AokFqgAdACtAFRADHBcJFxoDBB8eEwAABhsYHg0GIQA/Mz8zEjkvMxESARc5ETMRMzEwATIWFRQGIyImJzUWFjMyNjU0JiMiBgcnEyEVIQc2AUiRsKqmSospOIw2X25tZjlMHzshAe/+gxQ+BGiPe4ybHxeDIiZTWU5YEQgpAaBo5gwAAAIAKQI5AqIFxwAXACMANkAcGxIhCwAABhIDJSQeCxUADxAPAg8PAxgVIQgDHwA/Mz8zEjkvXRI5MxESARc5ETMzETMxMBMQNjMyFxUmIyIGBzM2NjMyFhUUBiMiJgUyNjU0JiMiBhUUFinb20oxNFONlgoIHXFVfZSmjZmtAURRY1hWVXBqA8MBBf8PchKZpis7lH6QpNJjXWNPW1o7WXwAAAEAOQJKAo8FtgAGABxADQEFBQACAwcIAgMeACAAPz8zERIBFzkRMzEwEwEhNSEVAaIBXv45Alb+oAJKAvh0XvzyAAMAMwI5ApMFxwAVACIALQA/QCIWDSYTKwMcBwcDBRATDQYuLwUQICALKRspAikpGQohIwAfAD8yPzM5L10zEjk5ERIBFzkRMxEzETMRMzEwATIWFRQHFhUUBiMiJjU0NjcmJjU0NgMUFjMyNjU0JicnBgYTIgYVFBYXNjU0JgFkfJeUsKWKkp9JVUo5nTVUVlpUXVEcSEasREtEUYxOBcd2aIJMSp5xiYB0RXQuLl1EZn79ZjxJSTw/TxwKIlQB7zw5L0chNmE5PAACACMCOQKcBckAFgAiADxAHxoRIAoAAAURAyMkHQ4KCwsUDw4fDgIODgMXFB8IAyEAPzM/MxI5L10SOREzETMREgEXOREzMxEzMTABEAYjIic1FjMgEyMGBiMiJjU0NjMyFiUiBhUUFjMyNjU0JgKc2tRTMTFdARQVCiN0QYOZqYiYsP64UV9VV1RzZwRG/vL/D3QUAUYzNJKDiKXKW19XUV9VPmFyAAAWAFT+gQfBBe4ABQALABEAFwAbAB8AIwAnACsALwAzADcAOwA/AEMARwBTAFsAawB0AHwAiQD4QIdBQD08MTAPBQAMVE5YSHZrcGB6Z4WGRUQpKCUkFAoJFxeGBhI7G39nYDgYNy9rNCxIIx8gHAMRTgwZiosKACpCWlGGXHRcKUFGPmR1dWxFPYJ9VktrdmsmMiUxFQ0AQgFBPlw9bA0xMgNrDFxsa2tsXAMBLSwdHBkYExIPDDk4NTQhIAcGBAEALzMzMzMzMzMzMy8zMzMzMzMzMzMSFzkvLy8REhc5ETkSOTkROTkRMxEzETMRMxDEMsQyETMRMxI5ETMRMxEzEMTEMhEzETMREgEXOREzMzMzMzMzMzMRMxEzETMRMxEzETMRMzMzMzMzMzMzMTATESEVIxUlNSERIzUBETMVMxUhNTM1MxEhNSEVITUhFQE1IRUBIxEzESMRMwE1IRUBIxEzATUhFTM1IRUBIxEzNSMRMwEjETMFFAYjIiY1NDYzMhYFFDMyNTQjIiUzMhYVFAYHFRYWFRQGIyMTMzI2NTQmIyMVFTMyNjU0IwEiJzUWMzI1ETMRFAZUAS/ABc4BMG35AG/ABQ7Dbf1JARH74QEO/vIBDgS3bW1tbfvCARD8MG9vAsABEHcBEfqob29vbwb+bW37n4d/f4eHf36I/nOHh4eHAeGsbXAuLD0ubV7Pe0IuJCovO0oxJVoBXjQcKxlWfWkEvgEwb8HBb/7QwfkCAS/CbW3C/tFtbW1tBv5vb/qoAQ4CAgEP+jttbQGmAQ4ESm9vb2/8LwEQeQEP/WgBEEmRnJyRkpuak8XFxGFDUzFCCAgORDVRWQFiIiAiHeOaKyVK/voKZghWAZL+cl9jAAADAFT+wQeqBhQAAwAeACoALkAZAQsXJQQeHxEDCSssKB4UDiIeDg4eIgMCAAAvLxc5Ly8vETMRMxESARc5MTAJAwU1NDY3NjY1NCYjIgYHFzYzMhYVFAYHBgYVFQMUFjMyNjU0JiMiBgP+A6z8VPxWA+ssQWdJu6VPukdSoFo/PjFIVDsbR0ZCSUhDSEUGFPxW/FcDqfsvMkExUn5Yh5o4KrJQOi81SzZEcEo7/u0/SEk+QElI////kf4UAlcGIQImAjcAAAEHAUz+qQAAAAizARgRJgArNf//ABkDwQFEBbYCBgIHAAAAAgAK/+wE3wYrAC0ANgBmQDkbBxcLNCUuHx8rAi0CJQsHEgY3OBQOR1kAIS4hR1krLg8uHy4CCQMULhQuBSgoMUZZKAEFHUZZBRYAPysAGD8rERIAOTkYLy9fXl0RMysRADMrERIBFzkRMzMRMxEzETMRMzEwARYVEAAhIBE0NzY1NCYjIgYHJzYzMhYVFAcGFRQzIBE0JyYkJjU0NjMyABMzFSUmAiMiBhUUBARWBP7g/v3+dxAPJCAZNg8hU19YXQ8Q6QF3BN/+yaC2qNABACqP/scct3tdYQETA04uQf6f/m4BWDl7ehcvIw8JdiddXSODhDrPAnA/LAJpvIOQo/7N/teBgdMBAF9LjZoAAQAAAAAEewXDABUAKEAUERIHEhQDFhcAEhQDEhIFCkpZBQQAPysAGD8/EjkREgEXOREzMTABEhI2NjMyFxUmIyIOAwcRIxEBMwI5eo1NXDowKBofKDtWfGUfrP4jugLNASMBN2wwD4cGOKH87FX94wIvA4cAAAIAEv/sBncESAAUACkATEAnGAMSISEeJw0KDR4DBgUqKxMfHwAIFQsGCAZGWQgPJBsAG0ZZEAAWAD8yKxEAMxg/KxEAMzMREjkYLzkREgEXOREzETMSOREzMTAFIiY1NBMhNTchFSMWFRQGIyInIwYBBgIVFBYzMjY1NTMVFBYzMjY1NCcCKbrHh/7jjgXX+nXIud1ECET+zz9CbHVdbKJrXXVtbxTn8PABB0pEjvz78Oe2tgPOhP7+Z66oj328vHqSqa3+7wD//wDJAAAGcQd1AiYAMAAAAQcAdgGcAVQACLMBHQUmACs1//8AsAAABssGIQImAFAAAAEHAHYBzQAAAAizAS0RJgArNf//AAD91QUQBbwCJgAkAAAABwJbATUAAP//AF791QPNBFoCJgBEAAAABwJbAMcAAP///t//7AXSBc0AJgAyFAABBwJc/kcAAAAJswMCGgMAPzU1AAACAHX91QI1/4MACwAXAB5ADBIGDAAGABgZFQMPCQAvM8wyERIBOTkRMxEzMTABFAYjIiY1NDYzMhYHNCYjIgYVFBYzMjYCNX1mZXh4ZWV+bkIzM0I8OTVA/q5heHViYnV2YTk8PDk4PT0AAgCYBGgCzwXFAAgAFwAeQA4OCQMIDBMJBRgZAgsIFQAvxNzGERIBFzkRMzEwATY3MxUGBgcjJTQ3FQYVFB4CFRQjIiYBsEYcvSl3MU7+6O15HyUfXTdDBIe1ehROrDl2oz1IKTUUExAaHEpEAP//AB0AAAbTBh8AJwBJArAAAAAmAEkAAAAHAEwFbQAA//8AHQAABsMGHwAnAEkCsAAAACYASQAAAAcATwVtAAAAAgB9/+wGZAYUABUAIQA8QB8WBg8RERwAABQLBgQiIxQLAwkJH0lZDwkEAxlJWQMTAD8rABg/xisREgA5ORESARc5ETMzETMRMzEwARAAISAAERAAISAXPgI1MxcGBgcWARASMzISERACIyICBbz+nf7G/r3+oQFhAUMBRbMyOhu2Dh2DaGD7dfr08/b18vP9At3+nv5xAYkBagFoAYbXDENmaRabrSew/v7+1v7OATEBKwEnATH+0QAAAgBz/+wFGQTwABYAIgA8QB8XBxASEh0AABUMBwQjJBUMAwoKIEZZEAoQAxpGWQMWAD8rABg/xisREgA5ORESARc5ETMzETMRMzEwARAAIyImAjUQADMyFz4CNTMXBgYHFgUUFjMyNjU0JiMiBgRi/vLuk+R8AQzu2YkzOhq0Dx95Zkf8vZ6tr52fr62cAiX+9P7TigECrQEMASuND0FjbhecryaKudPb29PS2NgAAQC6/+wGewYUABsAM0AYBQcHAQsUEQsRHRwKAQ4bBRIDDhdJWQ4TAD8rABg/xjMSOTkREgE5OREzETMzETMxMAEVPgI1MxcGBgcREAAhIAA1ETMRFBYzMjY1EQUZOkYftQ4hrJX+4f74/vT+1KrMxrjBBbbGCD5wbha2uBn9jf7+/uoBH/0DrvxGt8TBvAO4AAABAKT/7AWWBPIAHQBEQCIBHA0PDxMUBwcKExwTHh8VFgoSFgMUDQgdDxkERlkZFhQVAD8/KwAYPzPGEhc5ETMREgE5OREzMxEzETMRMxEzMTABERQWMzI2NREzFTY2NTMXBgYHESMnIwYGIyImNREBTHqCrJ+mUkqyDyCwjYkYCTS1b8vIBEb9O4aEvNUCPnkLgJoXur8O/KyTUlW+0QLLAP///FME2f3cBiEABwBD+soAAP///Q0E2f6WBiEABwB2+4QAAP///BkE2f8BBd0ABwFS+xEAAAAB/QgEuP5zBo8AEQAeQAwCBQUNDQgAABMLEAQAL8wyEQEzETMzEjkRMzEwARQHByMnNjY1NCYjIgc1NjMg/nOmCmkMVk5DST4gJkUBAAXXjCJxsA4yKyspBmQKAAH9O/6g/gL/fQALABG1BgAADQkDAC/NEQEzETMxMAU0NjMyFhUUBiMiJv07OyooOjooKjvyOTY2OTc3NwD//wDJAAAD+AdzAiYAKAAAAQcAQ//YAVIACLMBDQUmACs1//8AywAABVIHcwImAbIAAAEHAEMAaAFSAAizAREFJgArNf//AHP/7AQSBiECJgBIAAABBgBDtwAACLMCHBEmACs1//8AsAAABGIGIQImAdIAAAEGAEPcAAAIswEPESYAKzUAAQCF/+wHkQXJADEARUAkIhYqJy8JCQQnGxYFMjMAHxkfSVkQKCgTBhkELCUTJUlZDBMTAD8zKxEAMxg/MxI5LzkrEQAzERIBFzkRMxEzETMxMAEiBgcnNjMyABEQACMiJicjBgYjIAAREBIzMhcHJiYjIgIREBIzMjcRMxEWMzISERACBaQ8Xi1FfpbkAQH+5f9srFMIUKlr/wD+5f/kmXxGLV08k6XPu4tmqmaOu86lBS8pH5JQ/oj+rf6N/mEtMzIuAZsBdwFTAXhQkh8p/tf+9v7T/rJMAcn+N0wBSwEwAQsBKAABAAAAAAYdBEgAHQAoQBYXAA0OBQUeHxsVDQASCgQEFg4FDwQVAD8/MzMSFzk/ERIBFzkxMAEGBgMjATMTFhczNjYTAzMAFhczNhIRMxACByMDJgMnChSz1f5/rPYgLggTSo6ssgEJLQoIrZmmw9u2fSEByRoz/oQESP1JXb01owEkAdX8/5AsuAGzAVL+lv4H5QFaXAACABcAAAT8BhQAEQAaAExAKAgEEhIBDxYLCwYPAAQbHAcRABFJWQQACBpJWQAIAAgPAgAPEkpZDxIAPysAGD8SOTkvLysRADMrEQAzERIBFzkRMxEzMxEzMzEwEyERMxEhFSERMyARFAQhIREhATMyNjU0JiMjFwE/rAGi/l7JAjH+9/77/mj+wQHr1cC1utq2BPoBGv7mlP7g/mTQ2gRm/CuJkIp6AAACABcAAAScBScAEQAZAEdAJgQAExMPCxYHBwILDQQaGwMNDg1GWQQSRlkEBAsQAA4PCxNGWQsVAD8rABg/M8YSOS8rKxEAMxESARc5ETMRMzMRMzMxMAEhFSERISARFAYjIREjNTM1MxERISA1NCYjAagBWP6oAT8Btd/c/iHr66YBMQEfh5wESIz+xf7NpqgDvIzf/M3+l7lcVAABAMn/7AchBcsAIABKQCkXExMUBhgdDAUYERQGISIbAElZGwQGEhcSSVkDFxcUFQMUEg4JSVkOEwA/KwAYPz8SOS8zKxEAMxg/KxESARc5ETMRMxEzMTABIgQHIRUhEgAzMjcVBiMgAAMhESMRMxEhEgAlMhcHJiYFj+P+/B8Cv/09CAEJ95rCmN7+wf6lCP6iqqoBZB4BcQEw1bZIZJ0FM/rxlv7v/uI3lTkBcAFU/VAFtv2SATMBTgJckjAmAAABALD/7AWcBFwAIQBZQDIWGRkKAwkFBQYQIBgDBgUiIw0TRlkNEBkECQRGWRYPCR8JAgsDCQkGBw8GFQAcRlkAFgA/KwAYPz8SOS9fXl0zKxEAMxg/KxESARc5ETMRMxEzMxEzMTAFIgAnIREjETMRITYkMzIWFwcmIyIGByEVIRYWMzI2NxUGBHfr/vQL/uGmpgEhGAEN31GaNjKKZaOnEAIY/eYJqaQ9d2JuFAEK+P4SBEj+M+v2IBmNM6Sqjby1FiWTOQACAAAAAAVtBbYACwASADRAGwIDBwwDDQoFFBMBBQwFSVkQCAwMBwgDCwMHEgA/MzM/EjkvEjkrEQAzERIBFzkRMzEwASMRIxEjASMBMwEjASEnJicGBwOYlJyV/t+yAmieAme3/VwBTFI4HhhAAqr9VgKq/VYFtvpKAz/PkGRipAAAAgAKAAAEeQRIAAsAEgA1QBwFBgoMBg0DAQYUEwQIDAhGWRELDAwKCw8GAgoVAD8zMz8SOS8SOSsRADMREgEXOREzMTABASMDIxEjESMDIwEDISYmJyMGAqgB0azPcZdzzawB0SEBDys4IgkcBEj7uAHp/hcB6f4XBEj+LWyKalwAAAIAyQAAB14FtgATABoARkAlDgoKCwIDEhUDFAgHCwcbHAUBCQ4JSVkUGAwODgsQDAMTBwMLEgA/MzMzPzMSOS8SOTMrEQAzMxESARc5ETMRMxEzMTABIxEjESMBIwEhESMRMxEhATMBIwEhAiYnBgYFhY+ak/7jugEi/l+qqgHhAQaeAma8/WYBPnYcDBMjArD9UAKw/VACsP1QBbb9kgJu+koDSAE1Vi9DaAACALAAAAYUBEgAEwAZAE1AKxENDQ4FBgEZBhgLCg4HGhsIBAwRDEZZGBUTLxE/EQIREQ4TDw8PCgYCDhUAPzMzMz8/EjkvXRI5MysRADMzERIBFzkRMxEzETMxMAEBIwMjESMRIwMjEyERIxEzESETFyMGBgchBEYBzqrQcZhu0azR/t+mpgFexWgICiBZAQwESPu4Ae7+EgHu/hIB7v4SBEj+MwHNcyJf2QAAAgAUAAAFrgW2AB8AIgBLQCggAQ8QIR4eHRACAQcGJCMeASEfHyFJWQ4SHRJKWSICHR0YHwMQCBgSAD8zMz8SOS8zMysRADMrERIAOTkREgEXOREzETMRMzEwARUBHgIXEyMDLgIjIxEjESMiBgYHAyMTPgI3ATUFIQEFKf5adppkMoWuiSNEZVkbqhpbY0Egh7mIL2OVdv5lA779CgF7BbaF/hEGSIuk/jsByW9gJv1CAr4nX2/+NwHFn45JBwHvhZn+OQAAAgAMAAAFFARIACAAIwBOQCohAQ8QIh8YHx4QAgEHByUkHwEiICAiRlkRDhIeEkdZIwIeHhggDxAIGBUAPzMzPxI5LzMzKxEAMzMrERIAOTkREgEXOREzETMRMzEwARUBHgMTIwMuAiMjESMRIyIGBgcDIxM+AzcBNQUhAQSL/q5Xb0kxm6yFIjpUTAqZC0tSOCeHqoMYMEluV/6xAyD9tAElBEhp/qAHMFBp/nEBUFdHHP32AgoaQF7+rgFQPWlPMggBYGmM/sEAAAIAyQAAB8UFtgAkACcAYUA1IR0dHiYjDxACJyUBBwEnECIbIxgeCSkoIwEkJiQmSVkSDhwhHElZJwIhIR4kAx8DGBAIHhIAPzMzMz8/EjkvMzMrEQAzMysREgA5ORESARc5ETMRMxEzETMRMxEzMTABFQEeAhcTIwMuAiMjESMRIyIGBgcDIxM2NyERIxEzESEBNQUhAQc9/l14mWUtiKiKH0ZpXxisGV5kQiGHsoc3OP5SqqoC1/5oA8H9CgF7BbaF/g4GSJCc/jsByWhjKP1EArwoX2z+NwG+uDr9UAW2/ZIB6YWZ/jcAAAIAsAAABroESAAkACcAZ0A6IR0dHiYjDxACJyUBBwEnECIbIxgeCSkoIwEkJiQmRlkSDhwhHEZZJwIvIT8hAiEhHiQPHw8YEAgeFQA/MzMzPz8SOS9dMzMrEQAzMysREgA5ORESARc5ETMRMxEzETMRMxEzMTABFQEeAxMjAy4CIyMRIxEjIgYGBwMjEzY3IREjETMRIQE1BSEBBjH+rlhvSTCbrIUiOlZKCpoKS1Q3Joeqgy8l/s2mpgI1/rADIf20ASUESGn+ngcxTmn+cgFQVkYc/fgCCBs/XP6uAVB4KP4QBEj+NQFiaYz+xwABAD/+TgQ1BtEASwCEQE0AEyE/GUZGCj83QzwqHC0oEwtMTUkWSllJEzk0MQ8uHy4vLgMJAy4qQEMdHB0cSlkdHRA8KiokSlkqBAoJSVkKEBADSVkQIwwHSVkMIgA/KwAYPysAGBDGKwAYPysRADMSORgvKxESADkaGBDdX15dOcQyPysREgEXOREzETMRMzEwFxQWMzI3NjMyFxUmIyIHBiMiJjU0Njc2NjUQISM1MzI2NTQmIyIGByc2NyYnJzUzFhc2NjMyFxUmIyIGBxYWFRQGBxUWFhUUBAUGBvBXWWF4eEabR1CgRGlpabO42ejMtf5A2tHN4aKJartuVqi+OXUxe1yDXINAMjAYKyxvMLLBv6q6y/7l/uaKhok3MgcGJ6YzBQV9hX6BCQiKjQEMj5OEa4A3RXJyHEJ5NBs7iHNWDnEKUkcXvY+MuBoIGLKQ0NUJBTcAAAEAGf57A38FTgBGAINAThcpNgsuEBAgCwMOCD4yQDwpC0dIRD5BAAVHWQAPQR9BL0EDCQNBPiYaRlkjHUZZDjMyMzJGWSYjMzMjJgMgPj44RlkIPhAgIhMsR1kTFgA/KwAYPz8zKxESABc5GC8vLysREgA5KysAGBDUX15dxCsREgA5ERIBFzkRMxEzETMxMAEyFxUmIyIGBxYWFRQHFRYVFAYHDgIVFBYzMjc3MhcVJiYjBwYjIiY1NDY3JDU0JiMjNTMgNTQjIgYHJzY3Jic1MxYXNjYC+DMtGCkvZy16jNP48uFdbTBLWVZ6r30nFVQ3s4JckJ++tAFOnJ+UdwE3/EqPWDt8flxne0uMWIYFTg9wCk8+HIpruDkIR8qUqAMCFyosMSsFBSePExgFBXdwdH0DBL5hWo2soiIkhzcPdWIbNIluVf//AG0AAAXyBbYCBgF1AAD//wCk/hQFhwYSAgYBlQAAAAMAff/sBb4FzQALABIAGQBHQCUWEBAGFw8PAAYAGhsWEElZDxYBCwMWFgMJCRNJWQkEAwxJWQMTAD8rABg/KxESADkYL19eXSsREgE5OREzETMRMxEzMTABEAAhIAAREAAhIAABMhITIRISEyICAyEmAgW+/p3+xP69/qEBYAFEATsBYv1h5fcN/CsN+ejg+xMD0xH0At3+of5uAYsBaAFlAYn+cPxEAREBDP71/u4EtP7+/wD+AQQAAAMAc//sBGIEXAAMABMAGgBJQCcXEREHGBAQAAcAGxwXEUZZDxcfFwILAxcXAwoKFEZZChADDUZZAxYAPysAGD8rERIAORgvX15dKxESATk5ETMRMxEzETMxMAEQACMiJgI1EAAzMgABMjY3IRYWEyIGByEmJgRi/vLuk+R8AQzu5gEP/giepAr9aQmgoJyeDQKTD6ECJf70/tOKAQKtAQwBK/7O/U24v7q9A1itp6isAAABAAAAAAVIBcMAFQAgQBAGFhMXEQBKWREECgUGAwUSAD8/Ejk/KxEBMxI5MTABIgYHASMBMwEWFzY3Ez4CMzIXFSYE4TtOOf64xf3utAFSSCMgRqI7VG5ZKk84BTdntfvlBbb8VsePkN8CBr+YQRONFAABAAAAAAQ9BFIAFgAeQA8BFw8YDRJHWQ0QBQEPABUAPz85PysRATMSOTEwIQEzExIXMzYTEz4CMzIXFSYjIgYHAwGW/mqu4WQTCBdSYCVHW1QtHh0mLzoc+ARI/Zv+9GR2AQsBNXp7NAp/CFRc/N///wAAAAAFSAdzAiYCgAAAAQcDdgTXAVIACrQCASEFJgArNTX//wAAAAAEPQYhAiYCgQAAAQcDdgRkAAAACrQCASIRJgArNTUAAwB9/hQJogXNAAsAFwAuAERAJgwGEgAhLicYAAYGLzAlKkpZJRsdHBwDIBgPCRVJWQkEAw9JWQMTAD8rABg/KwAYPzMSOREzPysREgEXOREzETMxMAEQACEgABEQACEgAAEQEjMyEhEQAiMiAiUzExYXMzY2EzMBBgYjIic1FjMyNjc3BVT+uf7c/tf+vQFDASwBIwFF+93f2drd3Nja4QRvsPZOFAgLU+Sw/itFvIhMSjdCXnUjPQLd/qD+bwGLAWgBZgGI/nD+oP7X/s0BMQErASkBL/7SQf2Lz2Ys+wKD+yC2nhGFDGdZnP//AHP+FAh7BFwAJgBSAAAABwBcBHUAAAACAH3/hwYQBi0AEwAoAFFAKhQKJg0HESIiAxwfAAAcBxcKBSkqJCImDSZJWREPDQMcGhcHF0lZBQMHEgA/MzMrEQAzMxg/MzMrEQAzMxESARc5ETMRMzMRMxEzMxEzMTABEAAFBiMiJyQAERAAJTYzMhcEAAEUEhc2NjMyFzYSNTQCJwYjIicGAgYQ/tH++Bp3fBT+9P7RASsBEBR8eRYBDAEt+yHKvRFJNm4fvcrKvR9ucR+9ygLd/tL+cyxvbykBigE2ATEBhSxsbCz+c/7V9P7PKTAmVikBMfT0AS8nWFYn/tMAAAIAc/+TBM8EtAAXAC0AUEAqGAwPCSsbJRUDIwAAAyAbCQwGLi8oJSsPK0ZZFRIPECAeGwkbRlkGAwkVAD8zMysRADMzGD8zMysRADMzERIBFzkRMxEzMxEzETMRMzEwARQCBwYGIyImJyYCNTQSNzY2MzIWFxYSBRQWFzY2MzIXNjY1ECUGBiMiJicGBgTP4MwJQDg5PQnL5eDQCD45OEAJyuL8UH2JDDw1ZxiGfP78DT0zNTwMiX0CJen+3yU2LSs4JAEm5ekBICQ4Kis5Jv7c4bHSHyoiSh/SrwFgPiogICwf0QAAAwB9/+wHfwg7ABUARQBUAFVALkM3HysrASZGS1BIPAw3ClVWFQICBwcQDFJASDoiQDpASVkoOgQcFjQWSVkuNBMAPzMrEQAzGD8zKxEAMxgQ1hrc1M0yEjkvMxESARc5ETMRMzEwARUjIi4CIyIGFRUjNTQ2MzIeAjMBMjY3FhYzMhIREAIjIgYHJzYzMgAREAAhIiYnBgYjIAAREAAzMhcHJiYjIgIREBIBFAc1NjU0LgI1NDMyFgWiEVSOeGYrLzx9dHA6cHeFTv0oWKs9N6tdvNKlkzxfK0Z5muQBAf7g/v1oqkxLp27+/P7jAQHkmnlGK148lKXSAoDteB8kH1w4QwfHeSQrJDQzEBxnbiQsJPi6Qj85SAFOAS0BCwEoKx+SUv6I/q3+jP5iKDAtKwGdAXUBVQF2UpIfK/7Z/vT+0f60BmiiPUgpNRQSERocSUQAAAMAc//sBgQHBgAqAD8ATgBcQDMTBxwoKCwiQEUNSkI2BwpPUDI6Py0tNkxCCkAfEAoQRlkCF0ZZAgQlChAaFQQVRlkABBYAPzMrEQAzGD8zEjkrKxEAMxoYEN7c1DIRM80yERIBFzkRMxEzMTAFIicGIyICERASMzIWFwcmIyIGFRAhMjcWFjMgETQmIyIHJzY2MzISERACAxUjIi4CIyIVFSM1NDYzMh4CMwUUBzU2NTQuAjU0MzIWBCuUXlyP4frPuj53KDlZR3RtATF7cD5vQwEtbnNHWTkodz67zvdREFSPeGUra31zcDpxdoNO/vDudx4kHlw4QxRBQQEjAQ4BFwEoIBmLM9bW/l5QKiYBotbWM4sZIP7X/ur+9f7aBqV4JCokZhEfZG8lKyXdoT5IKDgUEREZG0pEAAACAF7/7Ad/BwQADQBAAF9ANDAkOTY+FxcBEjYpDCQHQUIOLSctSVkeNzchJwUJCQ1ACQ9IDQcDC0AUJwQ7MyEzSVkaIRMAPzMrEQAzGD8zGt4yMs0rMhEzERI5LzkrEQAzERIBFzkRMxEzETMxMAEVByMnIwcjJyMHIyc1ASIGByc2MzISERAAISImJyMGBiMgABEQADMyFwcmJiMiAhEQEjMyNjcRMxEWMzISERACBYtQIDK6MSExvC8hUANDPF0tRnyZ5P/+4v79dKxMCU6scP78/uMBAeWWfkYtXTyTpdK+QYIzqmaRvNSlBwQbrGdnZ2esG/4rKR+SUP6I/q3+i/5jMDAxLwGgAXIBVQF2UJIfKf7X/vb+0f60JiYByf43TAFKATEBCwEoAAACAAAAAAYdBaQADQAqAD9AJCQBDhobDBIHKywoFQ4fFgMREgUJCQ1ACQ9IDQcDCyMbEg8RFQA/PzMz3jIyzSsyETMREhc5PxESARc5MTABFQcjJyMHIycjByMnNQEHAyMBMxMWFzM2NhMDMwAWFzM2EhEzEAIHIwMmBLZSHjK8MR8xvDIeUAGsJ6rV/n+s9icpCAwjuqyyAQktCgitmabD27Z9IQWkG6xnZ2dnrBv8JV/+lgRI/UlvqyNRAYgB1fz/kCy4AbMBUv6W/gflAVpcAAABAH3+FATjBcsAFwAtQBgDDwkKFQoPAxgZEwBJWRMEDAZJWQwTChsAPz8rABg/KxESARc5ETMRMzEwASIAERAAITI3ESMRIyAAETQSJDMyFwcmA0j1/uABCgECbzmqFP61/p+vAUjY7apHqwUz/sD+6P7a/tQX/XQB2AGEAW3gAVa4VJJOAAEAc/4UA6IEXAAYAC9AGA8DFxYJFgMDGRoXGwYMRlkGEAASRlkAFgA/KwAYPysAGD8REgEXOREzETMxMAUiABEQADMyFhcHJiMiBhUUFjMyNjcRIxECdf7+/AER+0+kMDGOaLGrq6s1UDmmFAEfARIBFAErIheNM83d3MgRGv1uAdgAAAEAav/8BHUFBgATAC9AIQQCCAMGABEHChANEgwODhUUEwADEQYPBRAHDQoJDAsBEgA/zRc5ERIBFzkxMAEDJxMlNwUTJTcFExcDBQclAwUHAgK2ebb+4UIBIc3+30MBIbl2uAEhRP7hzAEeQQE5/sNDAUKmc6gBZKZ1qAE9Q/7ApnOm/p6ocwABAMsEkQOsBbQAEwAeQAwABgoQBhAUFQMADQkALzMzMhESATk5ETMRMzEwAQYGIyImNTQ2MyE2NjMyFhUUBiMBhwYqMDMpKjYBwQYrLzMtLDYE8C0yMjU1KS4wMTM4KAABAPgE5QPbBdcAEwAcQAsHEhUUABISDASACQAvGswyMxEzERIBOTkxMAEyNzYzMhYVFSM1NCMiDgIjIzUBBHiWlVFvdH1qK2Z5jlQQBWI7Om9kHxFmJCskeQABAd8E1wLNBjUADgAYQAoKAAwFAAMPEAMNAC/MERIBFzkRMzEwATQ2MzIVFA4CFRQXFSYB30M4XB4kHnfuBbg4RUwbGRASFDYoSkAAAQHhBNcCzwY1AA4AGEAKBQAACgIDDxAMAgAvzBESARc5ETMxMAEUBzU2NTQuAjU0MzIWAs/udx4kHlw4QwW4oUBKKDYUEhAZG0xFAAgAKf7BB8EFkQAMABoAKAA2AEQAUgBfAG0AgEBJXyhEWiI+DBoHFFI2bUwwZxBubwAHOkhIQU9FRD5MVmNjXGpmX1ptHiwsJTMvIigDNhAXB09Mam0zNhcXNjNtakxPBwgJDRQDCQAvMy8zEhc5Ly8vLy8vLy8RMxEXMxEzMxEzETMzMxEzMxEzETMzMxEzMxEzETMREgEXOTEwASYmIyIGByM2MzIWFwMmJiMiBgcjNjYzMhYXASYmIyIGByM2NjMyFhchJiYjIgYHIzY2MzIWFwEmJiMiBgcjNjYzMhYXISYmIyIGByM2NjMyFhcBJiYjIgYHIzYzMhYXISYmIyIGByM2NjMyFhcEbwU8RU4yBUsLxV1xB08FPEVOMgVLBWRnXHMGAfQFPEROMgVMBWVnXHMG+y8FPEROMgVMBWVnXHMGBDEFPEROMgVMBWVnXHMG+y8FPEROMgVMBWVnXHMGBPAFPEROMwVLC8Zccwb5vgU8RE4yBUwFZWdccwYEzywsKS/CZV358iwsKS9ZaWZcARYtKycxWmlmXS0rJzFaaWZdA9stKycxWmlmXS0rJzFaaWZd/hksLCgwwmhaLSsnMVpoZlwAAAgAKf5/B30F0wAHAA8AFwAfACcALgA1AD4ANEAlFRclID46BQEpLB8cMjUJDRA/QDsrBy42GRUdES8nDyQzDgUMBQAvLxIXORESARc5MTAFFwYGByM2NwMnNjY3MwYHATcWFhcVJicFByYmJzUWFwE3NjY3FwYHAQcGByc2NwMnJic3FhcBFxYWFwcmJicENwsRRiRhNRE7CxNJH2E0EgIjDkfIQd2B+2gOQr9P3YEDpgJDvkNFsXj86gKbqUWxeCsRUkVDe0wDahEnWhZDH4ImIw5Cv0/dgQSYDkfIQdyC/hYLE0kfYTUROwsRRiRhNREBqhAnWBlEblj8lRBZP0RuWALeAoy3RsZj/OkCRcI8RjLDNAAAAgDJ/oMGCAdeABQAIgBZQC8NCgwHDg4JEwICFBQYIAkKBSQjFBIGBRESBRIOAA4JSVkOEgwiHw8YARgcFQcAAwA/Mt4yzV0yPz8rERIAOTkRMxEzGD8REgEXOREzETMRMxEzMxEzMTATMxEUBwczATMRMwMjEyMRNDcjASMBIiYnMxYWMzI2NzMGBsmhCgQIAzS4uI/FnKATCfzJugJDuqgKmwpdbmljCZ4MtQW2/NF2zlMExvri/esBfQMlr/f7NQYrj6RsTl1dn5QAAgCw/ocFEgYMABEAHwBPQCoKBwkECwsGDwEBEBAVHQYHBSEgAw4QEQ8LBkZZCxAVCSIcDxUBFRkSBA8AP94yzV0yPz8zKwAYPxI5ORESARc5ETMRMxEzETMzETMxMAERFAcBMxEzAyMTIxE0NwEjESUiJiczFhYzMjY3MwYGAUwKAlHPsIGsfZsI/a7NAey5qgqcB1p0Z2QKnQyyBEj9aoiIA6b8R/34AXkCoJ5o/FoESJGPpGZUWmCelQACAC8AAAR9BbYAEQAZAE1AKQgEEhIBDxULCwYPEQQaGwgZSVkHEQARSVkEAAgACAAPAg8SSlkPEgIDAD8/KxESADk5GC8vETMrEQAzKxESARc5ETMRMzMRMzMxMBMzNTMVIRUhETMgERQEISERIwEzIBE0JiMjL5qqAVb+qsACSv7s/vH+b5oBRN0Be7jJ1wT8urqW/uD+ZNLYBGb8KwEZhIAAAAIAFAAABEwGFAASABoAS0AoBAAUFBAMFwgIAgwOBBscBBNGWQMODw5HWQAPBA8EDwwRAAwURlkMFQA/KwAYPxI5OS8vETMrEQAzKxESARc5ETMRMzMRMzMxMAEhFSERITIWFRQGIyERIzUzNTMRESEgNTQmIwFWASf+2QFA39fg3f4hnJymATEBH4SfBR+B/eWam6SqBJ6B9fvg/pe5XFQAAAIAyQAABHkFtgAPABwASEApEAoKCxgAAAQFAxYGFRMUCwodHhYTHBAMHEpZCRBKWQYDDAkJCwwDCxIAPz8SOS8SOTkrKxESADk5ERIBFzkRMxEzETMxMAEUBgcXBycGIyMRIxEhIAQBMzI3JzcXNjU0JiMjBHlzbHhklWaIuKoBiQESARX8+qZXTGxsjH/CysgEDH/JOZ1UwBv9wQW21/3yCo1SsEiykY4AAgCw/hQEdQRcABgAKQBVQDEdCwQHBwgnEhIVFhQlFyIkIwgKKislIhkgDxlGWQwLCwQUFwQADxAJDwgbACBGWQAWAD8rABg/Pz8SFzkRMysREgA5ORESARc5ETMRMxEzMzMxMAUiJicjFhURIxEzFzM2NjMyEhEQBxcHJwYDIgYHFRQWMzI3JzcXNjU0JgKua7E8DAymhxkIQKlt2u23c2SDR22olgKaqi8peWqBZZYUT1KUIv49BjSWWlD+1v7z/q6RnFCuGAPjussl58cMnlCqZ/nX0QAAAQAvAAAECAW2AA0APEAfAwcHDAgABQgKBA4PBgoLCklZAwsLCA0NAklZDQMIEgA/PysREgA5GC8zKxEAMxESARc5ETMzETMxMAEVIREhFSERIxEjNTMRBAj9awGo/liqmpoFtpn+Apb9dwKJlgKXAAEAEgAAA0IESAANADxAHwIGBgsHAAQHCQQODwUJCglHWQIKCgcMDAFGWQwPBxUAPz8rERIAORgvMysRADMREgEXOREzMxEzMTABIREhFSERIxEjNTMRIQNC/hQBWv6mpp6eApIDvP6of/4bAeV/AeQAAAEAyf4ABNsFtgAbAEFAIwkDAwQZDg4HFAQEHB0RF0lZERwLAElZCwsEBQUISVkFAwQSAD8/KxESADkYLysAGD8rERIBFzkRMxEzETMxMAEiBxEjESEVIRE2MyAAERAAISImJzUWMyARNAACMWRaqgNJ/WFaeQFAAVX+4v79U31Ge4kBf/8AAo8M/X0Ftpn9/Ar+rf7G/sX+pRUcmDEB/vUBBAAAAQCw/goD+gRIABsAQUAjCBkUDg4PDwISGQQdHBYLRlkWFg8QEBNGWRAPDxUABUZZABsAPysAGD8/KxESADkYLysREgEXOREzETMRMzEwASInNRYzMjY1NCYjIgcRIxEhFSERNjMgABEQAgJGkWV0e4WIsrVFSqYCmv4MUjsBEAEH5P4KPJU/ytff0BH+JQRIjv63DP7l/tn+9f7aAAABAAL+gwb4BbYAFQBNQCkGEREDEg0MDAgJEgABFQcWFxIVEhMQCQYDAAAPAQ8KSVkPEg0iBwQBAwA/MzM/PysREgA5ETMzMzMzGD8zERIBFzkRMxEzMxEzMTABATMBETMRATMBATMRIxEjAREjEQEjAlb9wb4COaQCOr79wAHatKJe/bqk/bvHAvACxv08AsT9PALE/Tz9qP3pAX0C5f0bAuX9GwAAAQAE/ocGHwRIABUAS0AoAg0NFQ4JCAgEBQ4SExEHFhcVDwwFAhISCwMAEw8OERULBkZZCxUJIgA/PysAGD8zPzMzEjkRMzMzMzMREgEXOREzETMzETMxMAEzEQEzAQEzESMRIwERIxEBIwEBMwECpJkBxbb+NgFwwaJe/h6Z/h+/AfD+N7YBwwRI/e0CE/3t/lr9+AF5Ai390wIt/dMCNQIT/e0A//8ASv5CBDUFywImAbEAAAAHA38BWAAA//8ARP5CA38EXAImAdEAAAAHA38BCAAAAAEAyf6DBSsFtgAPADtAIAwICAkDAgIODwYJBRARDwwGAwUNCgMJEgUASVkFEgMiAD8/KwAYPz8zEhc5ERIBFzkRMxEzETMxMCUzESMRIwEHESMRMxEBMwEEf6yiZv3pmaqqApfJ/bSa/ekBfQLFiP3DBbb9KwLV/YUAAQCw/oUEPQRIAA4AOkAfDgoKCwYFBQECCwQPEAIOCQMIAAwPCxUIA0ZZCBUGIgA/PysAGD8/MxIXORESARc5ETMRMxEzMTABMwEBMxEjESMBESMRMxEDL7b+JwF/sp9U/gympgRI/e/+WP32AXsCK/3VBEj96wAAAQDJAAAE6QW2ABIAOEAeBgICAwoREQcSDgwSAwQTFAgKBgAQEgYDCwQDDwMSAD8zPzMSFzkREgEXOREzMxEzETMRMzEwAQcRIxEzETcRMxUBMwEBIwERIwHwfaqqfX0Bm8v9tAJiyP5MfQKoa/3DBbb9JYsBXdMBxv2F/MUCXP7PAAEAsAAABDsESAATADpAHwYCAgMOChISBxMPDBMDBBQVCAoGARETBgMLBA8QAxUAPzM/MxIXORESARc5ETMzETMzETMRMzEwAScRIxEzETcRMxUBMwEVASMBFSMBzXempneDAQ62/jwB68L+1YEBsnn91QRI/et5AUrNAR/+JWv9/gE73QAAAQAvAAAE6QW2ABMAR0AmCAQQEAERCw4MCgYOERMGFBUHEwATSVkECwgOAxEAAAINERIJAgMAPzM/MxI5LxIXOTMrEQAzERIBFzkRMxEzMxEzMzEwEzM1MxUzFSMRATMBASMBBxEjESMvmqrd3QKVy/20AmLO/fGZqpoFBLKyl/5uAtv9hfzFAsWG/cEEbQAAAQAUAAAEGwYUABkATUArCggEFhYBFxIQBhEXGQYaGxQKDxMXFQcZABlHWQQPAB8ALwADAAACDw8CAAA/PxI5L10zKxEAMxg/MxI5ORESARc5ETMzETMzMzEwEzM1MxUhFSERBwczNzY2ATMBASMBBxEjESMUnKQBff6DAwMIEjcoAXDH/kQB2cf+fX2knAVaurp//ehbNxhKMAGF/i39iwIEav5mBNsAAQAQAAAFgwW2AA0ANUAbAgoKCwUIBgQICwQODwgCAAcLEgMDAA1JWQADAD8rABg/PzMSOTkREgEXOREzETMRMzEwEyERATMBASMBBxEjESEQAfwClsv9tAJiyf3smqr+rgW2/SUC2/2F/MUCxYj9wwUdAAABACkAAATjBEgADAA1QBsFAQEJCQoMCgQGBA4NCAIABwoVAw8ADEZZAA8APysAGD8/MxI5ORESARc5ETMRMxEzMTATIREBMwEBIwERIxEhKQICAdu2/icCAML+CqT+ogRI/esCFf3t/csCK/3VA7wAAQDJ/oMFwQW2AA8AREAkDAgICQ0FBQADAgIACQMQEQwHSVkMDAUOCgMJEgUASVkFEgMiAD8/KwAYPz8zEjkvKxESARc5ETMRMxEzETMRMzEwJTMRIxEjESERIxEzESERMwUfoqKq/P6qqgMCqpr96QF9ArD9UAW2/ZICbgAAAQCw/ocE+ARIAA8ATkArAQ0NDgIKCgUIBwcFDgMQEQEMRlkPAR8BAgsDAQEKAw8PDhUKBUZZChUIIgA/PysAGD8/MxI5L19eXSsREgEXOREzETMRMxEzETMxMAERIREzETMRIxEjESERIxEBVgJmppamlv2apgRI/jUBy/xH/fgBeQHu/hIESAAAAQDJAAAGbwW2AA0AP0AhCgYGBwsDAwIAAgcDDg8KBUlZCgoHDAwBSVkMAwgDAwcSAD8zPz8rERIAORgvKxESARc5ETMRMxEzETMxMAEhESMRIREjETMRIREhBm/+sKz9AKqqAwAB/AUd+uMCsP1QBbb9kgJuAAEAsAAABcEESAANAElAJwELCwwCCAgHBAcMAw4PDQ8BCkZZDwEfAQILAwEBAwgMFQMGRlkDDwA/KwAYPzMSOS9fXl0rABg/ERIBFzkRMxEzETMRMzEwAREhESEVIREjESERIxEBVgJmAgX+oab9mqYESP41AcuM/EQB7v4SBEgAAQDJ/gAIHQW2AB0AR0AmBAUIAAABFw0NEgEFBB4fEBVJWRAcChpJWQoKBQYGA0lZBgMBBRIAPzM/KxESADkYLysAGD8rERIBFzkRMxEzETMRMzEwISMRIREjESERNjMgABEQACEiJzUWMyARNAIjIgYHBNmq/USqBBBEfQEyAVH+5f7+nHuGfwF65ugqfxgFHfrjBbb9YQz+qP7I/sf+pjGYMQH+8gEFBwUAAAEAsP4KBqgESAAcAEdAJhESFQ0NDgcaGgIOEgQdHhcKRlkXFxITExBGWRMPDhIVAAVGWQAbAD8rABg/Mz8rERIAORgvKxESARc5ETMRMxEzETMxMAEiJzUWMzIRNCYjIgcRIxEhESMRIRE2MzIAERACBReDYW1s8KasQ0io/d+mA29LQvYBBtH+CjyVPwGh39AV/ikDuPxIBEj+Jw7+1/7n/vT+2wACAH3/rAXhBc0AKAA0AFBALBsRLyMpAAgAAxYgIxEHNTYmLEpZDDImJg4UFBlJWRQECgVJWQoODh5JWQ4TAD8rABgQxCsAGD8rERIAORgvOTkrERIBFzkRMxEzETMxMAEUAgcWMzI3FQYjIicGIyAAERAAITIXByYjIBEQEjMyNyYCNTQSMzISAzQmIyIGFRQWFzY2BbiKdEJaTj04W7KUZpD+yv6hAUkBOn9cL1Ra/jP/6zYuVlzGr7XBsGddXmddU2ZzAqa1/stWHhaZGWQkAYkBVgF4AYojkRz9nv7g/s4KZwEcoPQBCv72/v6xzMmwjP5VQ/8AAAIAc//HBNMEXAAKADUAUEAsHhMAJgYsNCwvGCQmEwc2NykIR1kNAykpDxYWG0ZZFhALMUZZCw8PIUZZDxYAPysAGBDEKwAYPysREgA5GC85OSsREgEXOREzETMRMzEwARQWFzY2NTQjIgYBIicGIyImJjUQEjMyFwcmIyIGFRQWMzI2NyY1NDYzMhYVFAYHFjMyNxUGAu5EP0RTh0hLAWaTgmB7leJ6+ONbTSU2T5yRqqQlNQaLqJeUnWteNENCMScB8l6hNSyebut9/WNNKIv+pAETATAWihPR587SCQOU4a3BvbF90UAaDokOAP//AH3+QgTPBcsCJgAmAAAABwN/AiUAAP//AHP+QgOLBFwCJgBGAAAABwN/AYMAAAABABD+gwRaBbYACwAyQBsGCwgJAwkLAQQMDQsGSVkLEgkiBQECAUlZAgMAPysRADMYPz8rERIBFzkRMxEzMTABITUhFSERMxEjESMB3/4xBEr+MaKirAUdmZn7ff3pAX0AAAEAKf6HA5EESAALADRAGwYLCAkDCQsBBAwNCSIFAQIBRlkCDwsGRlkLFQA/KwAYPysRADMYPxESARc5ETMRMzEwASE1IRUhETMRIxEjAYn+oANo/p6WppYDvIyM/NP9+AF5AP//AAAAAAR7BbYCBgA8AAAAAQAA/hQEAgRIAA0AKUAUAAEMAQMDDg8IBw0HAgsDDwIVARsAPz8/MxI5OREzERIBFzkRMzEwASMRATMTFhczNjcTMwECVKb+UqzsUxMIIUbprP5S/hQB6ARM/ZveYYq1AmX7tAAAAQAAAAAEewW2ABAAOkAeBAgIDQkCBgkLDwUREgcLDAtJWQQADwwMCQEPAwkSAD8/MxI5LxI5MysRADMREgEXOREzMxEzMTABATMBFSEVIREjESE1ITUBMwI9AYa4/hgBK/7VrP7TAS3+GboC2wLb/IE7mP6cAWSYMwOHAAEAAP4UBAIESAATADxAHxEBAQYCEBMCBAcFFBUMCwsFDwcPAAQFBEdZEQUVAhsAPz8zKxEAMxg/MxI5ETMREgEXOREzMxEzMTAFESMRITUhATMTFhczNjcTMwEhFQJUpv7qART+VKzsUxMIIUbprP5UARKB/pUBa4EESP2b3mGKtQJl+7iBAAABAAj+gwTVBbYADwA3QCADAgIODwwGCQoICBARDA8JBgQFDQoDCBIFAElZBRIDIgA/PysAGD8/MxIXORESARc5ETMxMCUzESMRIwEBIwEBMwEBMwEEM6KiXv53/nC0Aeb+O7wBawFutf47mv3pAX0Cg/19AvwCuv29AkP9TAABACf+hQQ3BEgADwA5QCEKCQkFBgMNAAEPCBARDxUDBgANBAwBDAdGWQwVCiIEAQ8APzM/PysREgAXORg/ERIBFzkRMzEwAQEzAQEzAQEzESMRIwEBIwG4/oO9ASEBILv+gwErlaZF/s3+yrwCMQIX/lwBpP3p/l799gF7Abz+RAAAAQAQ/oMGqAW2AA8AQEAiDAUADQMCAg0KBQcFEBEOAwsHCAdJWQgDAAwFDElZBRIDIgA/PysRADMYPysRADMYPxESARc5ETMRMxEzMTAlMxEjESERITUhFSERIREzBf6qovu0/lYEL/4lAvCqmv3pAX0FHZmZ+30FHAABACn+hwWYBEYADwA/QCICCwYDCQgIAwALDQUQEQENDg1GWQ4PBgILAkZZCxUJIgQPAD8/PysRADMYPysRADMREgEXOREzETMRMzEwASERIREzETMRIxEhESE1IQN5/pcCRqacpvx4/r8DUAO6/NUDt/xJ/fgBeQO6jAAAAQCq/oMFaAW2ABcAO0AfFQAFAwIPDAIFDAMYGRIJSVkSEgUWDQMFAElZBRIDIgA/PysAGD8zEjkvKxESARc5ETMRMxEzMzEwJTMRIxEjEQYGIyImNREzERQWMzI2NxEzBMehoaqVxmrP36p/j2Gxqaqa/ekBfQJcNSe+swJF/c95dB03AsoAAAEAnP6FBMMESAAWADtAHwEVCQYODAsLDhUDFxgDEkZZAwMOBxYPDglGWQ4VDCIAPz8rABg/MxI5LysREgEXOREzETMzETMxMAERFDMyNjcRMxEzESMRIxEGBiMiJjURAULbW6ZpppamlmmzcaS6BEj+cMA4QwHV/Ef99gF7AfBIO6yTAZwAAQCqAAAExwW2ABYASkAmBQILFRUIFg0RERAQFgIDFxgUAAgASVkLCBYICQkIFgMDERIOAwMAPzM/Ehc5Ly8vETMrEQAzERIBFzkRMxEzETMzETMRMzEwASARETMRFBYzETMRNjcRMxEjEQYHESMCdf41qoeafYajrKyogX0CAAFxAkX9z3d2AVz+qg08As/6SgJYQRH+zwABAJwAAAQdBEgAFwBKQCYBFgYQEAMRCAwMCwsRFgMYGQ8TAxNGWQYDEQMEBAMRAwwJFw8MFQA/PzMSFzkvLy8RMysRADMREgEXOREzETMRMzMRMxEzMTABERQXETMRNjcRMxEjEQYHFSM1IyImNREBQsh3cYWmpoB2dxaguARI/nC6BgEt/t0YWQHV+7gB8Fsa+OqqlQGcAAEAyQAABOUFtgASAC9AFwIRERIJCAgSFBMEDUlZAhIEBAkSEgADAD8/MzkvEjkrERIBOTkRMxEzETMxMBMzESQzMhYVESMRNCYjIgYHESPJqgEAxM/fqn+Pa7qVqgW2/aRcv7H9ugIxeHYiMv01AAABALAAAARCBEgAEgAvQBcAEgsHBwgSCBQTDgNGWQsODggJDwAIFQA/Mz8SOS85KxESATk5ETMRMxEzMTAhETQjIgYHESMRMxE2NjMyFhURA5rZWJx3pqZfunKjvgGNwTFK/i0ESP4ORT6ol/5mAAIAPf/sBj8FzQAgACcAUUAqBQMAJBERCB4lEBAYHgAEKCkRHgceSVkkBwIHAhsMGxRJWRsTDCFJWQwEAD8rABg/KxESADk5GC8vMysRADMREgEXOREzETMzETMRMzMxMBM0NzMGFRQzMzcSACEgABEVIRIAMzI2NxUGBiMgAAMiJgEiAgchECY9G5EUcSIFHQFNARcBKQEo+9wOAQX3ZcqNct2C/sb+oxOOmwOv0fAQA27LA4dJNjI8ZysBKgFH/oX+j0X++P7vHyucJx4BZAFMdgIj/vX5AQn7AAACADP/7ATdBFoAHwAmAExAKAoIBRYNJBUVHQ0DBQUnKBYDDANGWSMMBwwHABERIEZZERAAGUZZABYAPysAGD8rERIAOTkYLy8zKxEAMxESARc5ETMRMxEzMzEwBSIAJyQ1NDczBhUUMzM3NjYzMhIVFSEWFjMyNjcVBgYDIgYHITQmA0rz/uwG/vYZjRRqFQYi+rfP8f0MBqytZZ9iWJ2ghpcOAj2MFAEe/ATdRTIvO2cjyuD+9+JpxsMgKpQmIQPjpJ6dpQACAD3+gwY/Bc0AIgApAF1AMQsJBiYXFw4DISInFhYeIgMGBSorIiIgExcDDQNJWSYNCA0IABISI0lZEgQAGkpZABMAPysAGD8rERIAOTkYLy8zKxEAMxg/PxESARc5ETMRMxEzMxEzETMzMTAFJAADIiY1NDczBhUUMzM3EgAhIAARFSESADMyNjcVBgcRIxMiAgchECYDoP7+/tsTjpsbkRRxIgUdAU0BFwEpASj73A4BBfdlyo2w66ZM0fAQA27LDB0BWgExdnVJNjI8ZysBKgFH/oX+j0X++P7vHyucPgX+lQay/vX5AQn7AAIAM/6HBN0EWgAhACgAWEAvCggFFg0gISYVFR0hDQMFBikqISIfFhYDDANGWSUMBwwHABERIkZZERAAGUZZABUAPysAGD8rERIAOTkYLy8zKxEAMxg/PxESARc5ETMRMxEzETMzMTAFJgInJDU0NzMGFRQzMzc2NjMyEhUVIRYWMzI2NxUGBxEjEyIGByE0JgLVv9MG/vYZjRRqFQYi+rfP8f0MBqytZZ9ijqWmRIaXDgI9jAofARHgBN1FMi87ZyPK4P734mnGwyAqlEEE/pkFSKSenaUA//8AVAAAAlYFtgIGACwAAP//AAIAAAa8B2ACJgGwAAABBwI2ARABVAAIswESBSYAKzX//wAEAAAF3wYMAiYB0AAAAQcCNgCkAAAACLMBEhEmACs1AAEAyf4ABRkFtgAcAEJAJQcDAwQaDg4JChQEBR0eERdJWREcBwJJWQsASlkHCwsECAUDBBIAPz8zEjkvOSsrABg/KxESARc5ETMRMxEzMTABIgcRIxEzEQEzATcgABEQACEiJic1FjMyEjU0JAJejF+qqgKJzf2FGgFPAWL+2f71UnxGepi7yP7rAnsf/aQFtv08AsT9VAL+u/7P/sb+pBQdmDEBDfHo/QAAAQCw/goEIQRIABwAQkAlBAAAARcKEAoGBwEFHR4OFEZZDhsEHEdZBxpGWQQHBwEFAg8BFQA/PzMSOS85KysAGD8rERIBFzkRMxEzETMxMCEjETMRATMBBBIRFAYGIyInNRYWMzI2NTQmIyIHAVSkpAHjt/43AQD8bsyFiF8ubEeHmLu+UlwESP36Agb+HgT+5P71sfyEPJEZJtnI088YAAEAAP6DBZEFtgAXADlAHwMABQQBAQUOAxgZFgdJWRYDDBFKWQwSBQBJWQUSAyIAPz8rABg/KwAYPysREgEXOREzETMzMTAlMwMjEyMRIQcCAgYnIic1FjMyNjYSEyEE2biPxZyq/iUfPV2Yfko7Njs1Tz1dOAMSmv3pAX0FH/D+If5FrgIZjxpX1wJZAbgAAAEAEP6HBI8ERgAUADlAHwMABQQBAQUNAxUWEwdGWRMPCxBHWQsVBQBGWQUVAyIAPz8rABg/KwAYPysREgEXOREzETMzMTAlMwMjEyMRIQICBiMiJzUWMzISEyED37CBrH2m/rUcXph2OhwWHHGJIgKBj/34AXkDuP6Y/mTACn8GAdkB9gAAAQDJ/gAFHwW2ABUAPUAgEg4ODxMLCwAABg8DFhcSDUlZEhIPFBADDxIDCUlZAxwAPysAGD8/MxI5LysREgEXOREzETMRMxEzMTAlEAAhIiYnNRYzIBERIREjETMRIREzBR/+5v77UnpNe4cBjPz+qqoDAqqW/sL+qBMeljEB9wIj/VAFtv2SAm4AAQCw/goEYgRIABUAR0AnDwsLDBAICBMTAgwDFhcPCkZZDw8fDwILAw8PDBENDwwVAAVGWQAbAD8rABg/PzMSOS9fXl0rERIBFzkRMxEzETMRMzEwASInNRYzMjY1ESERIxEzESERMxEQAgLThF1vZn12/ZympgJkqM/+CjqVPcbPAb3+EgRI/jUBy/vr/vT+4wABAMn+gwXXBbYADwBEQCQMCAgJDQMABQQBAQUJAxARDAdJWQwMBQ4KAwkSBQBJWQUSAyIAPz8rABg/PzMSOS8rERIBFzkRMxEzMzMRMxEzMTAlMwMjEyMRIREjETMRIREzBR+4kcWeqvz+qqoDAqqa/ekBfQKw/VAFtv2SAm4AAAEAsP6HBRIERgAPAERAJAENDQ4IBQIKCQYGCg4DEBEBDEZZAQEKAw8PDhUKBUZZChUIIgA/PysAGD8/MxI5LysREgEXOREzETMzMxEzETMxMAERIREzETMDIxMjESERIxEBVgJmprCBrH2m/ZqmBEb+NwHJ/En9+AF5Ae7+EgRGAAABAKr+gwTHBbYAFwA9QCAPDAIDFQUFAAADDAMYGRIJSVkSEgEWDQMDIgEESVkBEgA/KwAYPz8zEjkvKxESARc5ETMRMxEzETMxMCEjESMRMxEGBiMiJjURMxEUFjMyNjcRMwTHqqKilcZqz9+qf49hsamq/oMCFwHCNSe+swJF/c95dB03AsoAAQCc/oUELQRIABYAPUAgARULDAYODgkJDBUDFxgDEkZZAwMKBxYPDCIKDUZZChUAPysAGD8/MxI5LysREgEXOREzETMRMxEzMTABERQzMjY3ETMRIxEjETMRBgYjIiY1EQFC21umaaaVppVps3GkugRI/nDAOEMB1fu4/oUCCgFhSDuskwGcAAEAyf6DBykFtgAYAEhAJQkGBgcRDgwTEg8PEwcDGRoXFgILAhMIEw5JWRMSESIMCAMABxIAPzM/Mz8/KxESADk5ETMzERIBFzkRMxEzMzMRMxEzMTAhASMXFhURIxEhATMBMxEzAyMTIxE0NyMBA1D+EAgHB50BAAHRCAHR/riPx56qDgj+DAUQf8Av/F4FtvtKBLb65P3pAX0DroTc+vIAAAEAsP6HBd8ERgAYAD9AIBMUCAUKCQYGChQDGRoLEgASDwMVDxQVCgVGWQoPFQgiAD8/MysAGD8/MxI5OREzERIBFzkRMxEzMxEzMTAlNzcBMxEzAyMTIxEHBwEjASYnESMRMwEWAukfKwEp07CBrH2TFDr+5Yv+5TUUlMsBKS2gXXYC0/xJ/fgBeQOJOpn9SgK4hkv8dwRG/S1u//8AVAAAAlYFtgIGACwAAP//AAAAAAUQB14CJgAkAAABBwI2ADkBUgAIswIPBSYAKzX//wBe/+wDzQYMAiYARAAAAQYCNugAAAizAiURJgArNf//AAAAAAUQByUCJgAkAAABBwBqAD0BUgAKtAMCJAUmACs1Nf//AF7/7APNBdMCJgBEAAABBgBq8wAACrQDAjoRJgArNTX////+AAAGgQW2AgYAiAAA//8AXv/sBnMEXAIGAKgAAP//AMkAAAP4B14CJgAoAAABBwI2ABABUgAIswEMBSYAKzX//wBz/+wEEgYMAiYASAAAAQYCNgwAAAizAhsRJgArNQACAHX/7AVYBc0AEgAZAD1AIBcOEBYWCQkCDgMaGw8XSVkPDwwGDBNJWQwTBgBJWQYEAD8rABg/KxESADkYLysREgEXOREzETMRMzEwASIHNTY2MyAAERAAISARNSECAAMyEjchEBYCmOPic9KGAUsBb/6m/sv9rAQvEf75w9L5EPyHzAU1TJ4mIP5x/pv+ov5xAutGAQoBDvtOAQ33/vj8AAACAGb/7AQGBFwAFAAbADtAHxkJGAsDAxEJAxwdChlGWQoKBgAGFUZZBhYADkZZABAAPysAGD8rERIAORgvKxESARc5ETMzETMxMAEyABEQACMiAjU1ISYmIyIGBzU2NhMyNjchFBYB+vUBF/792tDzAvQFs6ZipV9ZopqFmgz9w40EXP7U/vv++P7JAQzhacy7ISmTKCL8G6WcnaQA//8Adf/sBVgHJQImAuEAAAEHAGoAkwFSAAq0AwIvBSYAKzU1//8AZv/sBAYF0wImAuIAAAEGAGrqAAAKtAMCMREmACs1Nf//AAIAAAa8ByUCJgGwAAABBwBqARABUgAKtAIBJwUmACs1Nf//AAQAAAXfBdMCJgHQAAABBwBqAKIAAAAKtAIBJxEmACs1Nf//AEr/7AQ1ByUCJgGxAAABBwBq//MBUgAKtAIBPgUmACs1Nf//AET/7AN/BdMCJgHRAAABBgBqlAAACrQCATgRJgArNTUAAQBK/+wENwW2ABkAQEAjABMVGQ8DAxkTFggFGhsZFhcWSVkAEkpZAAAGFwMGDEpZBhMAPysAGD8SOS8rKxEAMxESARc5ETMRMxEzMTABBAQVFAQhICc1FhYzMjY1NCYjIzUBITUhFQH8ARcBJP7N/ur+/6Ng3mrHyuHfjAHu/U4DhwM/CdPBzuhPni4ymZCGio0B3pmLAAABABv+FAOmBEgAGQBAQCMAExUZDwQEGRMWCQUaGxkWFxZGWQASR1kAAAcXDwcMRlkHGwA/KwAYPxI5LysrEQAzERIBFzkRMxEzETMxMAEeAhUUACMiJzUWMzI2NTQmIyM1ASE1IRUBrJXmf/7Y7+qKt8ihxdbKeQHF/YkDOAHPB3LKiN7+7kaaVr6gpKpyAf6OewD//wDLAAAFUga0AiYBsgAAAQcBTQC0AVIACLMBEwUmACs1//8AsAAABGIFYgImAdIAAAEGAU0xAAAIswERESYAKzX//wDLAAAFUgclAiYBsgAAAQcAagC+AVIACrQCASUFJgArNTX//wCwAAAEYgXTAiYB0gAAAQYAaj0AAAq0AgEjESYAKzU1//8Aff/sBb4HJQImADIAAAEHAGoA0QFSAAq0AwItBSYAKzU1//8Ac//sBGIF0wImAFIAAAEGAGodAAAKtAMCLhEmACs1Nf//AH3/7AW+Bc0CBgJ+AAD//wBz/+wEYgRcAgYCfwAA//8Aff/sBb4HJQImAn4AAAEHAGoA0QFSAAq0BAMvBSYAKzU1//8Ac//sBGIF0wImAn8AAAEGAGobAAAKtAQDMBEmACs1Nf//AD3/7ASJByUCJgHHAAABBwBq/+0BUgAKtAIBMAUmACs1Nf//ADn/7AN9BdMCJgHnAAABBgBqjgAACrQCATARJgArNTX//wAb/+wE+Aa0AiYBvQAAAQcBTQAvAVIACLMBGgUmACs1//8AAv4UBAYFYgImAFwAAAEGAU2tAAAIswEZESYAKzX//wAb/+wE+AclAiYBvQAAAQcAagA7AVIACrQCASwFJgArNTX//wAC/hQEBgXTAiYAXAAAAQYAarcAAAq0AgErESYAKzU1//8AG//sBPgHcwImAb0AAAEHAVMAjQFSAAq0AgEqBSYAKzU1//8AAv4UBAYGIQImAFwAAAEGAVMEAAAKtAIBKREmACs1Nf//AKoAAATHByUCJgHBAAABBwBqAGoBUgAKtAIBKQUmACs1Nf//AJwAAAQtBdMCJgHhAAABBgBqFwAACrQCASgRJgArNTUAAQDJ/oMECAW2AAkALUAYBAkGBwEHCQMKCwkESVkJEgciAANJWQADAD8rABg/PysREgEXOREzETMxMBMhFSERMxEjESPJAz/9a6GhqgW2mft9/ekBfQABALD+hwNCBEYACQAtQBgECQYHAQcJAwoLCQRGWQkVByIAA0ZZAA8APysAGD8/KxESARc5ETMRMzEwEyEVIREzESMRI7ACkv4UlqaWBEaM/NX9+AF5//8AyQAABgoHJQImAcUAAAEHAGoBGwFSAAq0BAMtBSYAKzU1//8AsAAABXkF0wImAeUAAAEHAGoAxQAAAAq0BAMsESYAKzU1//8AL/51BAgFtgImApsAAAAHA4AAkwAA//8AEv51A0IESAImApwAAAAGA4F1AP//AAj+dQTJBbYAJgA7AAAABwOAA1gAAP//ACf+dQQ0BEgAJgBbAAAABwOBAsMAAAABAAYAAASWBbYAEQA7QCIPAhEBEA0ECgcJBgsMExIKEQARSVkHDQ8EAAACDA8SBQIDAD8zPzMSOS85EjkzKxEAMxESARc5MTATIQEzAQEzASEVIQEjAQEjASF/ATP+d7wBawFst/5wATz+ugG9wf53/nC2Ab/+ugNUAmL9uwJF/Z6Y/UQCg/19ArwAAAEAJwAABAgESAARADtAIg8CEQEQDQQKBwkGCwwTEgoRABFHWQcNDwQAAAIMDxUFAg8APzM/MxI5LzkSOTMrEQAzERIBFzkxMBMhATMBATMBIRUhASMBASMBIXUBEv60vQEhASC7/rIBGP7iAWi8/s3+yrwBZv7oAncB0f5cAaT+L4H+CgG8/kQB9gAAAgCDAAAENwW2AAoAEwA0QBoEExMHDwAHABUUAwxJWQMDCAUIEkpZCBIFAwA/PysREgA5GC8rERIBOTkRMxEzETMxMBM0JCEzETMRISAkASMiBhUUFjMzgwEkASDGqv5j/vX+9AMKut7CtsvZAaTUzgJw+krVAdt8jo+E//8Ac//sBDcGFAIGAEcAAAACAIP/7AZ3BbYAGQAjAEZAJB4DGAoKByMPEhIjAwMkJQYbSVkYBhAGEAAIAwwgACBKWRUAEwA/MisRADMYPxI5OS8vOSsREgEXOREzETMzEjkRMzEwBSImNTQkITMRMxEUMzI2NREzERQGIyImJwYTIyIGFRAhMjY1Ak7i6QEqASKRquZkearPuHafM3Epl9TCASF/jRLR0NneAnD7t+x7bgHm/hiuzlJaqgLAi5b+9HdwAAACAHP/7AaHBhQAIgAuAFFAKSwTDCAgHRomAwYGJhMDLzAeAA0QGhYEBBAWFipGWRYQACMQI0ZZCRAWAD8zKxEAMxg/KxESADkYLxI5Ejk/ERIBFzkRMxEzMzMSOREzMTAlMjY1ETMRFAYjIiYnIwYGIyICERASMzIWFzMmJjURMxEUFiEyNjU1NCYjIBEUFgT+dmuoyL2BnisIS7mB0Ojnz2qfPwwCCKZt/bmikpSi/uKLd4SIATn+vcjFW3FxWwEpAQwBDAEvTVURcBsBvvuMoIm5ziPnyf5O1tIAAQBO/+wGgQXLACoAS0AoBhMoGR8iIhYZEwENBissFwIBAgFKWQIgAiAlECUcSVklExAJSlkQBAA/KwAYPysREgA5ORgvLysREgA5ERIBFzkRMxEzETMxMAEjNTMyNjU0JiMiBgcnNjYzMhYVFAYHFQQTFhYzMjY1ETMRFAYjIiYnJiYBrsnBwNWagGexZ1Rd9oLW9bKcAWIGAmx8d3Co0r3K0AICzQKsj5OEbH83RXJIUMSnjbcaCDP+0ZZ/eYcBzf4pxsfRyJaRAAEAUP/sBcUEXAAlAEtAKBIeCiQCBQUkHiAOGAYmJyEPDg8ORlkPAw8DCBsbFEZZGxAIAEZZCBYAPysAGD8rERIAOTkYLy8rERIAORESARc5ETMRMxEzMTAlMhERMxEUBiMgAyYmIyM1MyA1NCMiBgcnNjYzMhYVFAcVFhYXFgRC3aa7xP6GEAWNlIxvASHyS4dNOVWjaLjTwGN7BQl3AQwBOf69ysMBTWNYjayiJCKHKCSbhrg5CBR6atMAAQBO/oME0QXLACMASkAoGRoeIyEgIBYaIwQQBiQlGgUEBQRKWQUFIxMjHklZIxIhIhMMSlkTBAA/KwAYPz8rERIAORgvKxESADkREgEXOREzETMRMzEwATQmIyM1MzI2NTQmIyIGByc2NjMyFhUUBgcVFhYVETMRIxEjA4Pl4tnRzeGkh2nDaVRh/oTc/b2juMOsoqwBnIWLj5OEa4A6QnJKTsSnjLcZCBmzlP7+/ekBfQAAAQBQ/ocEEARaAB4ASkAoBxIZHhwbGxUeEgMNBiAfFQQDBANGWQQEHg8eGUZZHhUcIg8KRlkPEAA/KwAYPz8rERIAORgvKxESADkREgEXOREzETMRMzEwATQhIzUzIDU0JiMiByc2MzIWFRQHFRYWFRUzESMRIwLV/suWdQE5hXeZlj2hy7/Vy35wnaaVAS3HjaxSUEaHSpqHtjkLJYlmnP34AXkAAAEAAP/pByEFtgAjADpAHRQjGh0dIwkDJCUbGwcSEgFJWRIDFwwHDEpZIAcTAD8zKxEAMxg/KxESADkYLxESARc5ETMRMzEwASEHAgIGBiMiJzUWMzI2NhISEyERFBYzMjY1ETMRFAYjIiY1BAz+SB8rTFOCZEVAMj8xQCw4SjcC729zcHGozbzEyAUf8P6u/kTSZhmPGj5oAQIB6QGu+8+JeXmHAc3+KcHMzMUAAAEAEP/sBikERgAdADpAHQAOBQgIDhYDHx4GBhQcHBBGWRwPAxkUGUdZCxQWAD8zKxEAMxg/KxESADkYLxESARc5ETMRMzEwARQWMzIRETMRFAYjIiY1ESECAgYjIic1FjMyEhMhA89od9Wmu768y/7FHF6YdjocFhxxiSICcQGDiYMBCgE7/r3Kw8TLAj3+mP5kwAp/BgHZAfYAAAEAyf/sB14FtgAZAENAIxcADwYJFhISEwkPEwMaGxYRSVkWBxYHExgUAxMSDANJWQwTAD8rABg/PzMSOTkvLysREgEXOREzETMRMxEzMzEwARQWMzI2NREzERQGIyImNREhESMRMxEhETME9m5zcHGmyL/DyP0nqqoC2aoBhYl5eYcBzf4pv87LxgEz/VAFtv2SAm4AAAEAsP/sBqgESAAYAE1AKgUCEwoNARYWFw0TFwMZGgEVRlkPAR8BAgsDAQsBCxcDGA8XFRAIRlkQFgA/KwAYPz8zEjk5Ly9fXl0rERIBFzkRMxEzETMRMzMxMAERIREzERQWMzIRETMRFAYjIiY1NSERIxEBVgJQpmp31aa7wLrN/bCmBEj+NQHL/T2JhQEMATn+vcrDxslz/hIESAAAAQB9/+wFmgXLABwAOkAfFggbAgIPHAgEHR4AHElZAAAFDAwTSVkMBAUZSVkFEwA/KwAYPysREgA5GC8rERIBFzkRMxEzMTABIRUQACEgABE0EiQzMhYXByYmIyAAERAAMyARIQNmAjT+zP7J/rv+k7MBVep47VNCWtZX/vX+3gEL9wG0/n8C8Fb+of6xAZEBYOUBVLUxJ5QmLv7F/uP+4/7DAdcAAAEAc//sBLAEXAAZADpAHxIHGAICDBkHBBobABlGWQAABAoKD0ZZChAEFUZZBBYAPysAGD8rERIAORgvKxESARc5ETMRMzEwASEVECEgABEQACEyFwcmIyIGFRQWMzI2NSECsgH+/f7+7v7XAUMBIdSvO6imzeXMxamv/qoCP0P98AEnARABDgErUINK3tLP36CdAAABABD/7AT0BbYAFAA5QB0FEwoNDQMTAAQVFgsLEAEQCElZEBMEAAEASVkBAwA/KxEAMxg/KxESADkYLxESARc5ETMRMzEwEzUhFSERFBYzMhERMxEUBiMiJjUREAQ8/i93cuio073GzQUdmZn8aIl7AQABz/4pwM3OwwOgAAABACn/7ASHBEYAFAA2QBwCEAcKCgAQEgQVFgESExJGWQgIDRMPDQVGWQ0WAD8rABg/EjkvKxEAMxESARc5ETMRMzEwASERFBYzMhERMxEUBiMiJjURITUhA4H+pm1216a9wMDJ/qgDWAO6/cmJgwEEAUH+vcrDy8QCP4wAAQBv/+wEWAXLACYAR0AmFSAMACQjBRsRIwAgBicoIw8SDxJKWQ8PHQMdGEpZHRMDCUpZAwQAPysAGD8rERIAORgvKxESADkREgEXOREzETMRMzEwEzQkMyAXByYmIyIGFRQWMzMVIyIGFRQWMzI3FQYhICQ1NDY3NSYmnAEI4QEC0V5ptWWMn9HI2dXe6Mq36cev/vv+9P7bz7yqtARcqcaQeEQ0e3KAk42Oio6NXJ5N3MWXwBYIGbL//wBa/+wDhwRcAgYBggAA//8AAP51BWsFtgAmAbUAAAAHA4AD+gAA//8AEP51BHMESAImAdUAAAAHA4EDAgAA//8AAP6gBRAFvAImACQAAAAHAmcE6QAA//8AXv6gA80EWgImAEQAAAAHAmcEeQAA//8AAAAABRAH4QImACQAAAEHAmYE/AFSAAizAhMFJgArNf//AF7/7APNBo8CJgBEAAABBwJmBKYAAAAIswIpESYAKzX//wAAAAAFEAfRAiYAJAAAAQcDdwTlAVIACrQDAhUFJgArNTX//wBe/+wEQQZ/AiYARAAAAQcDdwSTAAAACrQDAisRJgArNTX//wAAAAAFEAfRAiYAJAAAAQcDeATdAVIACrQDAhUFJgArNTX//wAt/+wDzQZ/AiYARAAAAQcDeASTAAAACrQDAisRJgArNTX//wAAAAAFEAhKAiYAJAAAAQcDeQTZAVIACrQDAhUFJgArNTX//wBe/+wEFwb4AiYARAAAAQcDeQScAAAACrQDAisRJgArNTX//wAAAAAFEAhiAiYAJAAAAQcDegTlAVIACrQDAi0FJgArNTX//wBe/+wDzQcQAiYARAAAAQcDegSRAAAACrQDAkMRJgArNTX//wAA/qAFEAdzAiYAJAAAACcCZwTpAAABBwFLACsBUgAIswMpBSYAKzX//wBe/qADzQYhAiYARAAAACcCZwR5AAABBgFL1AAACLMDPhEmACs1//8AAAAABRAIEwImACQAAAEHA3sE7AFSAAq0AwIXBSYAKzU1//8AXv/sA80GwQImAEQAAAEHA3sEmgAAAAq0AwItESYAKzU1//8AAAAABRAIEwImACQAAAEHA3wE6QFSAAq0AwIXBSYAKzU1//8AXv/sA80GwQImAEQAAAEHA3wEmAAAAAq0AwItESYAKzU1//8AAAAABRAIWAImACQAAAEHA30E6QFSAAq0AwIhBSYAKzU1//8AXv/sA80HBgImAEQAAAEHA30EoAAAAAq0AwI3ESYAKzU1//8AAAAABRAIXgImACQAAAEHA34E4wFSAAq0AwInBSYAKzU1//8AXv/sA80HDAImAEQAAAEHA34EmAAAAAq0AwI9ESYAKzU1//8AAP6gBRAHSQImACQAAAAnAU4ALQFkAQcCZwTpAAAACLMCDwUmACs1//8AXv6gA80F5QImAEQAAAAmAU7YAAEHAmcEeQAAAAizAiURJgArNf//AMn+oAP4BbYCJgAoAAAABwJnBMEAAP//AHP+oAQSBFwCJgBIAAAABwJnBLgAAP//AMkAAAP4B+ECJgAoAAABBwJmBNEBUgAIswEQBSYAKzX//wBz/+wEEgaPAiYASAAAAQcCZgTJAAAACLMCHxEmACs1//8AyQAAA/gHLwImACgAAAEHAVL/5AFSAAizARUFJgArNf//AHP/7AQSBd0CJgBIAAABBgFS0AAACLMCJBEmACs1//8AyQAABG8H0QImACgAAAEHA3cEwQFSAAq0AgESBSYAKzU1//8Ac//sBFwGfwImAEgAAAEHA3cErgAAAAq0AwIhESYAKzU1//8AXQAAA/gH0QImACgAAAEHA3gEwwFSAAq0AgESBSYAKzU1//8ASv/sBBIGfwImAEgAAAEHA3gEsAAAAAq0AwIhESYAKzU1//8AyQAABDkISgImACgAAAEHA3kEvgFSAAq0AgESBSYAKzU1//8Ac//sBB0G+AImAEgAAAEHA3kEogAAAAq0AwIhESYAKzU1//8AyQAAA/gIYgImACgAAAEHA3oEuAFSAAq0AgEqBSYAKzU1//8Ac//sBBIHEAImAEgAAAEHA3oEogAAAAq0AwI5ESYAKzU1//8Ayf6gA/gHcwImACgAAAAnAmcEvgAAAQcBSwACAVIACLMCJQUmACs1//8Ac/6gBBIGIQImAEgAAAAnAmcEsAAAAQYBS/EAAAizAzQRJgArNf//AFQAAAJWB+ECJgAsAAABBwJmA8kBUgAIswEQBSYAKzX//wB7AAAB5gaPAiYA8wAAAQcCZgNzAAAACLMBCBEmACs1//8AVP6gAlYFtgImACwAAAAHAmcDtAAA//8Anf6gAWYF3wImAEwAAAAHAmcDYgAA//8Aff6gBb4FzQImADIAAAAHAmcFfwAA//8Ac/6gBGIEXAImAFIAAAAHAmcEyQAA//8Aff/sBb4H4QImADIAAAEHAmYFjwFSAAizAhwFJgArNf//AHP/7ARiBo8CJgBSAAABBwJmBNkAAAAIswIdESYAKzX//wB9/+wFvgfRAiYAMgAAAQcDdwV9AVIACrQDAh4FJgArNTX//wBz/+wEdQZ/AiYAUgAAAQcDdwTHAAAACrQDAh8RJgArNTX//wB9/+wFvgfRAiYAMgAAAQcDeAV9AVIACrQDAh4FJgArNTX//wBh/+wEYgZ/AiYAUgAAAQcDeATHAAAACrQDAh8RJgArNTX//wB9/+wFvghKAiYAMgAAAQcDeQV7AVIACrQDAh4FJgArNTX//wBz/+wEYgb4AiYAUgAAAQcDeQTHAAAACrQDAh8RJgArNTX//wB9/+wFvghiAiYAMgAAAQcDegV5AVIACrQDAjYFJgArNTX//wBz/+wEYgcQAiYAUgAAAQcDegTFAAAACrQDAjcRJgArNTX//wB9/qAFvgdzAiYAMgAAACcCZwV/AAABBwFLAMEBUgAIswMxBSYAKzX//wBz/qAEYgYhAiYAUgAAACcCZwTNAAABBgFLDgAACLMDMhEmACs1//8Aff/sBmQHcwImAl8AAAEHAHYBKwFSAAizAisFJgArNf//AHP/7AUZBiECJgJgAAABBgB2bQAACLMCKxEmACs1//8Aff/sBmQHcwImAl8AAAEHAEMAhwFSAAizAiMFJgArNf//AHP/7AUZBiECJgJgAAABBgBD1AAACLMCJBEmACs1//8Aff/sBmQH4QImAl8AAAEHAmYFjwFSAAizAiYFJgArNf//AHP/7AUZBo8CJgJgAAABBwJmBNkAAAAIswInESYAKzX//wB9/+wGZAcvAiYCXwAAAQcBUgCgAVIACLMCKwUmACs1//8Ac//sBRkF3QImAmAAAAEGAVL1AAAIswIjESYAKzX//wB9/qAGZAYUAiYCXwAAAAcCZwV7AAD//wBz/qAFGQTwAiYCYAAAAAcCZwTJAAD//wC6/qAFGQW2AiYAOAAAAAcCZwVKAAD//wCk/qAEOQRIAiYAWAAAAAcCZwS4AAD//wC6/+wFGQfhAiYAOAAAAQcCZgVUAVIACLMBFgUmACs1//8ApP/sBDkGjwImAFgAAAEHAmYE1QAAAAizARkRJgArNf//ALr/7AZ7B3MCJgJhAAABBwB2AO4BUgAIswElBSYAKzX//wCk/+wFlgYhAiYCYgAAAQYAdnkAAAizASYRJgArNf//ALr/7AZ7B3MCJgJhAAABBwBDAFoBUgAIswEdBSYAKzX//wCk/+wFlgYhAiYCYgAAAQYAQ7sAAAizAR8RJgArNf//ALr/7AZ7B+ECJgJhAAABBwJmBWABUgAIswEgBSYAKzX//wCk/+wFlgaPAiYCYgAAAQcCZgTbAAAACLMBIhEmACs1//8Auv/sBnsHLwImAmEAAAEHAVIAfwFSAAizASUFJgArNf//AKT/7AWWBd0CJgJiAAABBgFS/wAACLMBHhEmACs1//8Auv6gBnsGFAImAmEAAAAHAmcFTAAA//8ApP6gBZYE8gImAmIAAAAHAmcEsgAA//8AAP6gBHsFtgImADwAAAAHAmcEnAAA//8AAv4UBAYESAImAFwAAAAHAmcFnv/9//8AAAAABHsH4QImADwAAAEHAmYEqgFSAAizAQ0FJgArNf//AAL+FAQGBo8CJgBcAAABBwJmBGoAAAAIswEaESYAKzX//wAAAAAEewcvAiYAPAAAAQcBUv/CAVIACLMBEgUmACs1//8AAv4UBAYF3QImAFwAAAEGAVKKAAAIswEfESYAKzX//wBz/sUE0wYUAiYA0wAAAAcAQgC0AAAAAvvlBNn+tAYhAAkAEwAeQAwECg4OAAAVDwaACwEALzMazTIRATMRMxI5OTEwASMmJic1MxYWFwUjJiYnNTMWFhf+tGA0sSW6HGMx/pxgOK4luxxjMQTZKso/FT2uRBksyD8VPa5EAAAC/HEE2f+uBn8ADQAVAChAERUABhERFwMGChUKFQoRwAYBAC8zGsw5OS8vERI5EQEzETM5OTEwASMmJwYHIzU3NjczFhcnNjczFQYHI/7TXnBjcmFeNXA0sEKXUEk2rFN4YATZS1tlQRk8e01epsJbcBVuYAAAAvuaBNn+1wZ/AA0AFQAqQBIGDhERAAAXAwYKDwoPChPABgEALzMazDk5Ly8REjkRATMRMxI5OTEwASMmJwYHIzU3NjczFhclIyYnNTMWF/7XXmFyamleNXA0sEKX/e5feFSsNEsE2UFlYEYXPHtNXqasXnAVbGEAAvxxBNn/ewb4AA0AHwA0QBgQEwATGwMGBhYODiEDCgYSChIKGR7ABgEALzMazDI5OS8vERI5EQEzETMzEhc5ETMxMAEjJicGByM1NzY3MxYXExQHByMnNjY1NCYjIgc1NjMy/tNecGNyYV41cDSwQpeofwZQCjk/OSsuGhk3wwTZS1tlQRk8e01epgF7Zx1RgwkgJiUZBlAGAAL8aATZ/ucHEAAXACUAOkAbGB4JCRUVJxseIh4ZEQkABQwiAAwMACIDFcAZAC8azBc5Ly8vETMQxDMRMxESOREBMxEzEjk5MTABIi4CIyIGByM2NjMyHgIzMjY3MwYGEyMmJwYHIzU3NjczFhf+LSVHQz8cKCoOWw1lSyVJQz4bKCoMWgtjXl5hcmppXjVwNLBClwY1HiUeMTJqcR4kHjExaHP+pEFlYEYXPHtNXqYAAvx5BNn+xwbBAAcAFAAkQA8HBAoKEhIWA0AHEQqADggALzMa3TLUGs0RATMRMxI5OTEwATY3MxUGByMTIAMzFhYzMjY3MwYG/V5QMaxWd2A+/uwPZglMamJWCGkLlQX0aGUVcl3+/AEESDlBQHiMAAL8eQTZ/scGwQAHABQAJEAPBwQKChISFgRAAREKgA4IAC8zGt0y1BrNEQEzETMSOTkxMAEjJic1MxYXAyADMxYWMzI2NzMGBv3RXndWrDRLNf7sD2YJTGpiVghpC5UF3V1yFWxh/uUBBEg5QUB4jAAC/HkE2f7HBwYAEQAeAC5AFQgAAAUNAxQUHBwgCxAEBBgYGxSAEgAvGs0yMxE5L8QyEQEzETMSFzkRMzEwARQHByMnNjY1NCYjIgc1NjMyAyADMxYWMzI2NzMGBv4xfwZSCjlCOSwlJBY+wJX+7A9mCUxqYlYIaQuVBnlkHSlaCSAlJRoGTgj90wEESDlBQHiMAAL8aATZ/ucHDAAXACQAMEAVGiIJCRUmBQwMHh4YFUARCQAhGoAYAC8a3TLWxDMazREzETkvMxEBMzIROTkxMAEiLgIjIgYHIzY2MzIeAjMyNjczBgYDIAMzFhYzMjY3MwYG/i0lR0M/HCgqDlsNZEwlSUM+GygqDFoLY93+7A9mCUxqYlYIaQuVBjMeJB4wMmhxHiQeMTFncv6mAQRIOUFAeIwAAQAx/kIBbQAAAA8AGkALAAUFAgoDEBENCAMAL8wyERIBFzkRMzEwFzQnMxYVFAYjIic1FjMyNt+Le55mY0EyIDYlM+5nh3iEW2cQbAowAAABABn+dQFxAJoACwAYQAkKAAYADA0IAwAAL8wyERIBOTkRMzEwJREQIyInNRYzMjURAXHkODwpPV6a/t/+/BiME2QBMAAAAQAZ/nUBcQCPAAsAGEAJCgAGAAwNCAMAAC/MMhESATk5ETMxMCURECMiJzUWMzI1EQFx5Dg8KT1ej/7q/vwYjBNkASUA//8ANAAAAkMFtgAHABT/eAAAAAIAc//sBBcEcwALABcAKEAUDAYSAAYAGBkJFUtZCSYDD01ZAxkAPysAGD8rERIBOTkRMxEzMTABEAIjIgIREBIzMhIBFBYzMjY1NCYjIgYEF/fe2fb52tj5/QSbjo2eno+NmgIv/vX+yAE1AQ4BDwE1/sv+8dDo6s7M7OkAAAEALQAAAjcEXgAKACZAEQkBAQAIAAsMBwQHBAEJEAEYAD8/Ejk5Ly8REgE5OREzETMxMCEjETQ3BgcHJwEzAjehCEM+lloBf4sCMe+MQzBwcgEjAAEAKQAAA9cEcwAZACxAGAcTABMXDgEFGhsQCktZECYYFwEXTFkBGAA/KxEAMxg/KxESARc5ETMxMCEhNQE+AjU0JiMiBgcnNjMyFhUUBgcFFyED1/xSAZGdcSyLd1icXFrA8sbagrr+uQICvoUBL3doU0FXZz1KbaiolnO7gOcGAAABAF7+lQQbBHQAJwBHQCYDBBsAEwcHAAQWIg0GKCkEFxYXFktZFxcKJSUeS1klJgoRS1kKJQA/KwAYPysREgA5GC8rERIAORESARc5ETMRMxEzMTABFAYHFRYWFRQEISImJzUWFjMgERAhIzUzMjY1NCYjIgYHJzY2MzIWA+6dkLCq/t7+9XTBW1/XYAF7/l6QkqvIk35gqm1UWuuC1ewDB4yyHggWtJLR4SMsni8xASkBCo+Xhmt6NEZwR1HDAAACABf+qARmBF4ACgASAEJAIRIFCQICCwcDAAMFAxMUAQUSBU1ZCRIODw8HEhIDBxADJAA/PxI5LxI5ETMRMysRADMREgEXOREzMzMRMxEzMTAlIxEjESE1ATMRMyERNDcjBgcBBGbZqP0yAr642f6GDAopRP45G/6NAXN9A8b8RAFc2t5WXP2eAAABAIX+lQQdBF8AGgA6QB8PAxkUCBQXAwQcGwARS1kAAAYVFRhMWRUQBgxLWQYlAD8rABg/KxESADkYLysREgEXOREzETMxMAEyBBUUACMiJzUWFjMyNjUQISIHJxMhFSEDNgIt5wEJ/t/+94JG0GWww/6JXqBWNwLX/bclcwIm5cfj/v5PoC0zpp0BMh03AqyZ/kkXAP//AHX/7AQvBcsCBgAZAAAAAQBe/qkEKwRfAAYAH0AQAQUFAAIDBwgDAkxZAxAAJAA/PysREgEXOREzMTABASE1IRUBAR0CXvzjA839qv6pBR2ZhfrP//8AaP/sBCkFywIGABsAAAACAGr+lQQlBHQAFwAlAEFAIhsRIgoKAAAEEQMmJw4eTVkKFA4OAhQUGEtZFCYCB01ZAiUAPysAGD8rERIAORgvEjkrERIBFzkRMxEzETMxMAEQISInNRYzMhITIwYGIyImNTQSMzIWEgEiBhUUFjMyNjY1NCYmBCX9aHREUGbw9QsMN7ZywuT/0JXfeP4Uj5yQk1uZWFKTAe/8phSPGgEpATNTV+jQ5AEImf7bATC4pJClSoBGabJmAP//AB0AAAXEBh8AJwBJArYAAAAGAEkAAAACAFwC3QWqBcEAIgAzAFpALiwwMC4qJiYoCgAcEQURFgAoLgY1NCsxJAMtLy0pLyMjKBwKFAgDAygpGRQUKQMAPzMvMxDNMi8zEjk5ETMRMxEzERIXORESARc5ETMRMxEzETMRMxEzMTABFAYjIic1FjMyNTQmJicmJjU0NjMyFwcmIyIGFRQWFhcWFgEDIxcRIxEzExMzESMRNyMDAkiVfJFKaneUFzZVeFGObn1cImRTPEsSK1+BUAGmyQgGd7zDy7R/BgjTA6xibSFsKGQhKCEfLFtMVmknYyUuKB0kHCQyWv7sAi+B/lIC0f3RAi/9LwGkif3T//8AEv4UBFoFtgImADcAAAAHAHoBPwAA//8AH/4UAqgFRgImAFcAAAAHAHoAxQAAAAIAcf4UBDcEXAAMACoAR0AmChUaAyoqHh4kFQMrLCEnRlkkIRscDxoPGBIYB0ZZGBASAEZZEhYAPysAGD8rERIAOTkYPz8zKxESARc5ETMRMzMRMzEwJTI2NzU0JiMiBhUUFgU0NyMGIyICERASMzIXMzczERQGIyInNRYWMzI2NQJMqpcEnquQmZcB2wkLcObZ7/PT33sLGIPs+fKVS9J2jqV3t8or4szg0NHZayRjpwEtAQoBCAExppL7pOzsRp4qLqmS//8Acf4UBDcGIQImA5EAAAEGAUsGAAAIswI5ESYAKzX//wBx/hQENwXlAiYDkQAAAQYBTgwAAAizAisRJgArNf//AHH+FAQ3Bd8CJgORAAABBwFPAVYAAAAIswI0ESYAKzX//wBx/hQENwYhAiYDkQAAAQYCOncAAAizAi8RJgArNQABAMkAAAFzBbYAAwARtgAEBQEDABIAPz8REgE5MTAzETMRyaoFtvpKAP//AAUAAAGOB3MCJgOWAAABBwBD/nwBUgAIswEFBSYAKzX//wCzAAACPAdzAiYDlgAAAQcAdv8qAVIACLMBDQUmACs1////xwAAAmkHcwImA5YAAAEHAUv+uwFSAAizARIFJgArNf//AAUAAAI4ByUCJgOWAAABBwBq/tABUgAKtAIBGQUmACs1Nf///6sAAAKTBy8CJgOWAAABBwFS/qMBUgAIswENBSYAKzX////zAAACSwa0AiYDlgAAAQcBTf7GAVIACLMBBwUmACs1////5wAAAlMHNwImA5YAAAEHAU7+wgFSAAizAQQFJgArNf//AFb+QgGiBbYCJgOWAAAABgFRMQD//wC7AAABfwcxAiYDlgAAAQcBTwAZAVIACLMBDQUmACs1//8Ayf5/A6MFtgAmA5YAAAAHAC0COwAA////5AAAAh0GCgAnA5YAqgAAAQcBVP3o/5cAB7IBCAAAPzUA//8AyQAAAXMFtgIGA5YAAP//AAUAAAI4ByUCJgOWAAABBwBq/tABUgAKtAIBGQUmACs1Nf//AMkAAAFzBbYCBgOWAAD//wAFAAACOAclAiYDlgAAAQcAav7QAVIACrQCARkFJgArNTX//wDJAAABcwW2AgYDlgAA//8AyQAAAXMFtgIGA5YAAP//AJkAAAIEB+ECJgOWAAABBwJmA5EBUgAIswEIBSYAKzX//wC4/qABfwW2AiYDlgAAAAcCZwN9AAAAAQAAA6oAigAWAFYABQACABAALwBcAAABDgD4AAMAAQAAAB8AHwAfAB8AUQB3AP8BewHsAmoCgwKuAtkDFQNBA18DdAOWA68D8QQaBFsEuQT7BUYFowXFBjQGkQbHBvsHGwdEB2QHuwhBCIAI2wkZCVUJigm4CggKOQpsCpQKwwrhCx8LVgucC9kMLAx5DMwM8A0kDUsNjw2/DeYOEg42Dk8Ocg6TDqkOyA8kD3kPtBAHEFQQlBEoEWYRlBHSEhASJxJ/ErkS+hNPE6MT1hQoFGgUpRTMFRcVRxWAFawV7hYGFksWhRaFFrYXARdTF6EX9RgaGJUYyxlHGZQZzxntGfUafxqVGs0a2RsTG2MbghvBG/EcExxFHGwcpRzdHPMdCB0eHXsdjB2dHa4dvx3RHd0eKx43HkgeWR5qHnwejR6eHq8ewR8ZHyofOx9MH10fbh+AH64gGSAqIDsgTCBeIG8gsSEYISghOCFIIVghaSF6IgUiESIhIjEiQSJSImMidCKFIpci/yMPIx8jLyM/I08jYCOmJAwkHCQsJDwkTSRdJLQkxSTWJOYk9yUHJRMlHyUwJUAlUSVhJXIlgyWUJaQltSXGJc4mOiZLJlsmbCZ8Jo0mniaqJrYmxybXJugm+CcJJxknKic7J0cnVydoJ3knySgiKDMoRChVKGYodyiIKJMoniivKMYo0ijeKO8pACkMKRcpTCldKW4peSmFKZYppimyKb4p+CotKj4qTipaKmUqdiqGKpcq3isnKzgrSCtZK2kreyuMK+8saSx6LIoslSyhLLIswyzULOQs9S0FLREtHS0uLT4tSS1ULWUtdS2yLgQuFS4lLjYuRi5XLmcueS6KLpwurS65LsUu1i7nLvgvCC8aLysvOy9ML10vbi9+L6Uv+DB3MRYxJzE4MUkxWTFkMW8xmDHBMdcx/zIfMlQyezK0MuYzBTNOM18zZzN4M4oznDOtM78z0DPjM+sz8zQSNBo0IjQqNDI0izSTNJs0wTTJNNE1BjUONTI1OjVxNXk1gTXoNfA2PDaQNqI2tDbENtQ25Db1Nwc3azfQOAY4ZzjFORI5TDmmOdI52josOjQ6XzrKOtI7EDtcO6g77TwlPF08uj0QPV89uT3LPdw97D38Pg0+Hz5vPoA+yj7SPto+7D70P1M/pj/lP/ZAB0A3QD9AhkCOQJZA30DnQSxBiUHBQdJCAUI8QkRCTEJUQlxCZEJsQnRCs0K7QsNC9EMrQ1tDlUPbRCNEYUSvRQ9FVkVeRbpGFUY0RnxGhEbKRyNHW0drR5tH0UgUSElIUUh1SH1IhUiqSLJJE0kbSUxJg0m0Se9KNEp9SrhLCEtlS6lLukwlTDVMg0yLTJNMpUytTQZNWE1gTXBNgE2xTdZN/U4OTh5OL05ATlJOZE51ToZOm06wTrhO2k73TxVPHU86T2lPmk+0T/JQWlB6UIpRJFEsUTRRV1F7UYdRoFHTUhhShlL4U25T1FQsVKBU9FT8VUtVYlV5VZBVp1YKVj5WY1aXVq5W0lcyV2JX41gsWD5YUFh9WIlYlVi8WONZAlkhWUBZdVm3WfxaTVpuWtNbJ1snWydbJ1snWydbJ1snWydbJ1snWydbJ1snXHFczFzdXOVdbF2nXgteHF4tXjleRV5XXoxew17TXuNfQF+XX+BgMWA6YENgTGB6YJlgqmC7YMtg22FOYZlh7WI7Ypti/mM/Y4Bj1mQsZI9k9GVpZeBmjGcwZzhnQGedZ/ZoL2hnaHloi2kBaQ1pgGnzap1rO2vRbDpsfWy/bQNtM21gbYZtrG6QbxtvgW/fcDFwgnDXcUNxe3G0cgZyVXKocvtzB3MTc1BzjHPNdBB0WHSsdOZ1HnVddaJ13XYddnN2xndCd7l3xXfReAJ4NHg8eG94rXjxeTB5cXmueex6MHpzer97C3tDe3p76HxLfMF9LX01fUZ9V32sffx+RH6Hfsx/FX9Vf5Z/2oAegG+AvYDFgNaA5oD4gQmBEYEZgSqBOoGLgdqB7IH9gg+CIYIzgkSCkILaguuC+4MNgx6DMINBg0mDUYNjg3SDhoOXg6iDuIPKg9uD7YP+hBCEIYRMhHeEiYSbhKeEsoS+hMqFEIVWhZSFnIX2hmSGyYcnh4GH1IgriHmIxIkTiWaJsInvii2KioqSip6Kqoq2isKK04rkivaLCIsaiyyLPotQi2KLdIuJi52Lr4vBi9OL5Yv3jAmMG4wtjEKMVoxijG6Mf4yQjKGMsYzDjNWM54z5jQuNHY0vjUGNVo1qjXuNjI2YjaSNsI28jc2N3o3wjgKOFI4mjjiOSo5cjm6Og46XjqiOuI7JjtmO6o77jwyPHI8ojzSPQI9Mj12Pbo9/j4+PoI+wj8GP0o/jj/OP/5ALkBeQI5A0kEWQVpBmkHKQppDhkR2RapHCkfqSMpJ7ks2S9ZMYkzuTRJODk62T7pROlJOU3pTmlQmVEZVulXqV9pYClg6WcZaBlpGWopaylseW2JbplvqXDJcdly6XP5dKl1uXZ5d5l4GXk5ebl62XtZe9l86X2gAAAAEAAAABGdsfPbV9Xw889QAJCAAAAAAAyTUxiwAAAADVK8zV+5r91QmiCGIAAAAJAAIAAAAAAAAEzQDBAAAAAAQUAAACFAAAAiMAmAM1AIUFKwAzBJMAgwaWAGgF1wBxAcUAhQJeAFICXgA9BGoAVgSTAGgB9gA/ApMAVAIhAJgC8AAUBJMAZgSTALwEkwBkBJMAXgSTACsEkwCFBJMAdQSTAF4EkwBoBJMAagIhAJgCIQA/BJMAaASTAHcEkwBoA28AGwcxAHkFEAAABS8AyQUMAH0F1QDJBHMAyQQhAMkF0wB9BecAyQKqAFQCI/9gBOkAyQQnAMkHOQDJBggAyQY7AH0E0QDJBjsAfQTyAMkEZABqBG0AEgXTALoEwwAAB2gAGwSeAAgEewAABJEAUgKiAKYC8AAXAqIAMwRWADEDlv/8BJ4BiQRzAF4E5wCwA88AcwTnAHMEfQBzArYAHQRiACcE6QCwAgYAogIG/5EEMwCwAgYAsAdxALAE6QCwBNUAcwTnALAE5wBzA0QAsAPRAGoC0wAfBOkApAQCAAAGOQAXBDEAJwQIAAIDvgBSAwgAPQRoAe4DCABIBJMAaAIUAAACIwCYBJMAvgSTAD8EkwB7BJMAHwRoAe4EIQB7BJ4BNQaoAGQC1QBGA/oAUgSTAGgCkwBUBqgAZAQA//oDbQB/BJMAaALHADECxwAhBJ4BiQT0ALAFPQBxAiEAmAHRACUCxwBMAwAAQgP6AFAGPQBLBj0ALgY9ABoDbwAzBRAAAAUQAAAFEAAABRAAAAUQAAAFEAAABvz//gUMAH0EcwDJBHMAyQRzAMkEcwDJAqoAPAKqAFQCqv//AqoAPAXHAC8GCADJBjsAfQY7AH0GOwB9BjsAfQY7AH0EkwCFBjsAfQXTALoF0wC6BdMAugXTALoEewAABOMAyQT6ALAEcwBeBHMAXgRzAF4EcwBeBHMAXgRzAF4G3QBeA88AcwR9AHMEfQBzBH0AcwR9AHMCBv/aAgYAqQIG/7MCBv/sBMUAcQTpALAE1QBzBNUAcwTVAHME1QBzBNUAcwSTAGgE1QBzBOkApATpAKQE6QCkBOkApAQIAAIE5wCwBAgAAgUQAAAEcwBeBRAAAARzAF4FEAAABHMAXgUMAH0DzwBzBQwAfQPPAHMFDAB9A88AcwUMAH0DzwBzBdUAyQTnAHMFxwAvBOcAcwRzAMkEfQBzBHMAyQR9AHMEcwDJBH0AcwRzAMkEfQBzBHMAyQR9AHMF0wB9BGIAJwXTAH0EYgAnBdMAfQRiACcF0wB9BGIAJwXnAMkE6QCwBecAAATpABQCqv/iAgb/kAKqACoCBv/aAqoAHgIG/8wCqgBUAgYANQKqAFQCBgCwBM0AVAQMAKICI/9gAgb/kQTpAMkEMwCwBCUAsAQnAMkCBgCjBCcAyQIGAFkEJwDJAgYAsAQnAMkCgwCwBC8AHQIX//wGCADJBOkAsAYIAMkE6QCwBggAyQTpALAFcwABBggAyQTpALAGOwB9BNUAcwY7AH0E1QBzBjsAfQTVAHMHYgB9B4kAcQTyAMkDRACwBPIAyQNEAGAE8gDJA0QAggRkAGoD0QBqBGQAagPRAGoEZABqA9EAagRkAGoD0QBqBG0AEgLTAB8EbQASAtMAHwRtABIC0wAfBdMAugTpAKQF0wC6BOkApAXTALoE6QCkBdMAugTpAKQF0wC6BOkApAXTALoE6QCkB2gAGwY5ABcEewAABAgAAgR7AAAEkQBSA74AUgSRAFIDvgBSBJEAUgO+AFICjwCwBJ4AwwUUAAAEcwBeBvz//gbdAF4GOwB9BNUAcwRkAGoD0QBqBLwBDAS8AQwEsgEtBLwBJQIGAKIEngFvAZMAJQS8AQgEngDnBJ4B/ASeARsFEAAAAiEAmATy/9QGff/UA5j/5AaB/+QFhf/UBoH/5AK2/+kFEAAABS8AyQQpAMkEkwAnBHMAyQSRAFIF5wDJBjsAfQKqAFQE6QDJBNMAAAc5AMkGCADJBG0ASAY7AH0F1QDJBNEAyQSJAEoEbQASBHsAAAZiAGoEngAIBl4AbQZCAFACqgA8BHsAAATjAHMDzQBaBOkAsAK2AKgE3wCkBOMAcwUGALAEGQAKBKQAcQPNAFoD3QBzBOkAsAS8AHMCtgCoBCUAsARG//IE9ACwBFYAAAPNAHEE1QBzBTMAGQTVAKYD2wBzBOcAcwPJABIE3wCkBb4AcwRe/+wGBgCkBi8AcwK2AAkE3wCkBNUAcwTfAKQGLwBzBHMAyQXfABIEKQDJBR0AfQRkAGoCqgBUAqoAPAIj/2AHbwAAB6AAyQXfABIE5QDJBPgAGwXVAMkFEAAABOcAyQUvAMkEKQDJBXcADgRzAMkGwQACBKYASgYZAMsGGQDLBOUAyQWiAAAHOQDJBecAyQY7AH0F1QDJBNEAyQUMAH0EbQASBPgAGwZiAGoEngAIBeUAyQWPAKoIQgDJCEQAyQWBABIG0wDJBSUAyQUKAD0IZgDJBRcAMwRzAF4ExQB3BI0AsANtALAEkwApBH0AcwXjAAQD3QBEBRIAsAUSALAEJwCwBJEAEAXhALAFEgCwBNUAcwT4ALAE5wCwA88AcwO8ACkECAACBbgAcQQxACcFAgCwBN0AnAcfALAHLQCwBY8AKQYpALAEvACwA/AAOQamALAEcQAlBH0AcwTpABQDbQCwA/AAcwPRAGoCBgCiAgb/7AIG/5EGsgAQBxcAsATpABQEJwCwBAgAAgT4ALAENwDJA20AsAdoABsGOQAXB2gAGwY5ABcHaAAbBjkAFwR7AAAECAACBAAAUggAAFIIAABSA0r//AFcABkBXAAZAfYAPwFcABkCzQAZAs0AGQM9ABkEBAB7BBQAewMCAKQGRgCYCZ4AZAHFAIUDJQCFAm8AUgJvAFAD4wCYAQr+eQMnAG0EkwBiBJMARAYbAJoEuAA/BpgAjQQpAHcIJwDJBjUAJQZCAFAE9ABmBj0ARwY9ACAGPQBHBj0AagSmAGYEkwAnBekAyQUMAEwEkwBoBGQAJQWkAHcDEgAMBJMAYgSTAGgEkwBoBJMAaASqAG8EvAAdBLwAHQSeANsCBv+RBAABiQQAAXEEAAGBAscAJwLHABQCxwA7AscAKQLHADkCxwAzAscAIwQAAAAIAAAABAAAAAgAAAACqgAAAgAAAAFWAAAEeQAAAiEAAAGaAAAAzQAAAAAAAAAAAAAIAABUCAAAVAIG/5EBXAAZBPoACgSFAAAGuAASBzkAyQdxALAFEAAABHMAXgZS/t8CqgB1AzMAmAd1AB0HdQAdBj0AfQTfAHMGJQC6BVIApAAA/FMAAP0NAAD8GQAA/QgAAP07BHMAyQYZAMsEfQBzBRIAsAgXAIUGjQAABWYAFwUOABcHWgDJBeMAsAVtAAAEgwAKB14AyQYhALAFxQAUBSMADAfLAMkGxQCwBKgAPwPdABkGXgBtBgYApAY9AH0E1QBzBQIAAAQMAAAFAgAABAwAAAmsAH0IfQBzBo0AfQVCAHMH/gB9BncAcwffAF4GjQAABR0AfQPnAHME3wBqBHUAywSeAPgEngHfBJ4B4QfpACkHpgApBikAyQUlALAE5wAvBLwAFATjAMkE5wCwBDcALwNtABIFIwDJBDMAsAcfAAIGPQAEBKYASgPdAEQFSgDJBFwAsATpAMkERACwBOkALwQjABQFgwAQBOwAKQX4AMkFLwCwBoEAyQXjALAIiQDJBuwAsAY7AH0FHwBzBQwAfQPPAHMEbQAQA7wAKQR7AAAEAgAABHsAAAQCAAAE9AAIBFYAJwbXABAFvAApBYkAqgTfAJwFjwCqBM0AnAWPAMkErgCwBrQAPQVGADMGtAA9BUYAMwKqAFQGwQACBeMABAWDAMkEZACwBaYAAASTABAF0QDJBO4AsAX2AMkFOQCwBY8AqgTdAJwHOwDJBeMAsAKqAFQFEAAABHMAXgUQAAAEcwBeBvz//gbdAF4EcwDJBH0AcwXXAHUEeQBmBdcAdQR5AGYGwQACBeMABASmAEoD3QBEBKoASgPpABsGGQDLBRIAsAYZAMsFEgCwBjsAfQTVAHMGPQB9BNUAcwY9AH0E1QBzBQoAPQPwADkE+AAbBAgAAgT4ABsECAACBPgAGwQIAAIFjwCqBN0AnAQ3AMkDbQCwBtMAyQYpALAENwAvA20AEgT4AAgEUgAnBJ4ABgQxACcE5wCDBOcAcwcxAIMHKwBzBzsATgZqAFAFAABOBC8AUAfZAAAGzwAQCBkAyQdOALAGDAB9BR8AcwWuABAFLQApBKoAbwPNAFoFmgAABJEAEAUQAAAEcwBeBRAAAARzAF4FEAAABHMAXgUQAAAEcwAtBRAAAARzAF4FEAAABHMAXgUQAAAEcwBeBRAAAARzAF4FEAAABHMAXgUQAAAEcwBeBRAAAARzAF4FEAAABHMAXgRzAMkEfQBzBHMAyQR9AHMEcwDJBH0AcwRzAMkEfQBzBHMAXQR9AEoEcwDJBH0AcwRzAMkEfQBzBHMAyQR9AHMCqgBUAgYAewKqAFQCBgCdBjsAfQTVAHMGOwB9BNUAcwY7AH0E1QBzBjsAfQTVAGEGOwB9BNUAcwY7AH0E1QBzBjsAfQTVAHMGPQB9BN8AcwY9AH0E3wBzBj0AfQTfAHMGPQB9BN8AcwY9AH0E3wBzBdMAugTpAKQF0wC6BOkApAYlALoFUgCkBiUAugVSAKQGJQC6BVIApAYlALoFUgCkBiUAugVSAKQEewAABAgAAgR7AAAECAACBHsAAAQIAAIE5wBzAAD75QAA/HEAAPuaAAD8cQAA/GgAAPx5AAD8eQAA/HkAAPxoAaQAMQGkABkBpAAZAy0ANASJAHMC9AAtBBQAKQSTAF4EjwAXBJMAhQSTAHUEkwBeBJMAaASTAGoFbQAdBloAXARtABIC0wAfBOcAcQTnAHEE5wBxBOcAcQTnAHECOwDJAjsABQI7ALMCO//HAjsABQI7/6sCO//zAjv/5wI7AFYCOwC7BF4AyQLl/+QCOwDJAAUAyQAFAMkAyQCZALgAAAABAAAIjf2oAAAJrPua/nsJogABAAAAAAAAAAAAAAAAAAADowADBLYBkAAFAAAFmgUzAAABHwWaBTMAAAPRAGYB8QgCAgsGBgMFBAICBOAAAu9AACBbAAAAKAAAAAAxQVNDAEAAIP/9Bh/+FACECI0CWCAAAZ8AAAAABEgFtgAAACAAAwAAAAEAAwABAAAADAAEA3wAAADGAIAABgBGAEgASQB+AMsAzwEnATIBYQFjAX8BkgGhAbAB8AH/AhsCNwK8AscCyQLdAvMDAQMDAwkDDwMjA4kDigOMA5gDmQOhA6kDqgPOA9ID1gQNBE8EUARcBF8EhgSPBJEEvwTABM4EzwUTHgEePx6FHsceyh7xHvMe+R9NIAsgFSAeICIgJiAwIDMgOiA8IEQgcCB5IH8gpCCnIKwhBSETIRYhICEiISYhLiFeIgIiBiIPIhIiGiIeIisiSCJgImUlyvsE/v///f//AAAAIABJAEoAoADMANABKAEzAWIBZAGSAaABrwHwAfoCGAI3ArwCxgLJAtgC8wMAAwMDCQMPAyMDhAOKA4wDjgOZA5oDowOqA6sD0QPWBAAEDgRQBFEEXQRgBIgEkASSBMAEwQTPBNAeAB4+HoAeoB7IHsse8h70H00gACATIBcgICAmIDAgMiA5IDwgRCBwIHQgfyCjIKcgqyEFIRMhFiEgISIhJiEuIVsiAiIGIg8iESIaIh4iKyJIImAiZCXK+wD+///8////4wNN/+P/wgLL/8IAAP/CAi3/wv+wAL8AsgBh/0kAAAAA/5b+hf6E/nb/aP9j/2L/XQBn/0T90AAX/c/9zgAJ/c79zf/5/c3+gv5/AAD9mv4a/ZkAAP4M/gv9aP4J/ub+Cf7Y/gnkWOQY43rkfQAA5H3jDuR74w3iQuHv4e7h7eHq4eHh4OHb4drh0+HL4cjhmeF24XQAAOEY4QvhCeJu4P7g++D04MjgJeAi4BrgGeAS4A/gA9/n39DfzdxpAAADTwJTAAEAAAAAAAAAAAAAAAAAugAAAAAAAAAAAAAAAAAAAAAAvgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJgAAAAAAAAArAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAdgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFIAAAAAAAADmwDrA5wA7QOdAO8DngDxA58A8wOgAUkBSgEkASUCaAGcAZ0BngGfAaADpAOlAaMBpAGlAaYBpwJpAmsB9gH3A6gDRgOpA3UCHAONAjQCNQJdAl5AR1taWVhVVFNSUVBPTk1MS0pJSEdGRURDQkFAPz49PDs6OTg3NjUxMC8uLSwoJyYlJCMiIR8YFBEQDw4NCwoJCAcGBQQDAgEALCCwAWBFsAMlIBFGYSNFI2FILSwgRRhoRC0sRSNGYLAgYSCwRmCwBCYjSEgtLEUjRiNhsCBgILAmYbAgYbAEJiNISC0sRSNGYLBAYSCwZmCwBCYjSEgtLEUjRiNhsEBgILAmYbBAYbAEJiNISC0sARAgPAA8LSwgRSMgsM1EIyC4AVpRWCMgsI1EI1kgsO1RWCMgsE1EI1kgsAQmUVgjILANRCNZISEtLCAgRRhoRCCwAWAgRbBGdmiKRWBELSwBsQsKQyNDZQotLACxCgtDI0MLLSwAsCgjcLEBKD4BsCgjcLECKEU6sQIACA0tLCBFsAMlRWFksFBRWEVEGyEhWS0sSbAOI0QtLCBFsABDYEQtLAGwBkOwB0NlCi0sIGmwQGGwAIsgsSzAioy4EABiYCsMZCNkYVxYsANhWS0sigNFioqHsBErsCkjRLApeuQYLSxFZbAsI0RFsCsjRC0sS1JYRUQbISFZLSxLUVhFRBshIVktLAGwBSUQIyCK9QCwAWAj7ewtLAGwBSUQIyCK9QCwAWEj7ewtLAGwBiUQ9QDt7C0ssAJDsAFSWCEhISEhG0YjRmCKikYjIEaKYIphuP+AYiMgECOKsQwMinBFYCCwAFBYsAFhuP+6ixuwRoxZsBBgaAE6WS0sIEWwAyVGUkuwE1FbWLACJUYgaGGwAyWwAyU/IyE4GyERWS0sIEWwAyVGUFiwAiVGIGhhsAMlsAMlPyMhOBshEVktLACwB0OwBkMLLSwhIQxkI2SLuEAAYi0sIbCAUVgMZCNki7ggAGIbsgBALytZsAJgLSwhsMBRWAxkI2SLuBVVYhuyAIAvK1mwAmAtLAxkI2SLuEAAYmAjIS0sS1NYirAEJUlkI0VpsECLYbCAYrAgYWqwDiNEIxCwDvYbISOKEhEgOS9ZLSxLU1ggsAMlSWRpILAFJrAGJUlkI2GwgGKwIGFqsA4jRLAEJhCwDvaKELAOI0SwDvawDiNEsA7tG4qwBCYREiA5IyA5Ly9ZLSxFI0VgI0VgI0VgI3ZoGLCAYiAtLLBIKy0sIEWwAFRYsEBEIEWwQGFEGyEhWS0sRbEwL0UjRWFgsAFgaUQtLEtRWLAvI3CwFCNCGyEhWS0sS1FYILADJUVpU1hEGyEhWRshIVktLEWwFEOwAGBjsAFgaUQtLLAvRUQtLEUjIEWKYEQtLEUjRWBELSxLI1FYuQAz/+CxNCAbszMANABZREQtLLAWQ1iwAyZFilhkZrAfYBtksCBgZiBYGyGwQFmwAWFZI1hlWbApI0QjELAp4BshISEhIVktLLACQ1RYS1MjS1FaWDgbISFZGyEhISFZLSywFkNYsAQlRWSwIGBmIFgbIbBAWbABYSNYG2VZsCkjRLAFJbAIJQggWAIbA1mwBCUQsAUlIEawBCUjQjywBCWwByUIsAclELAGJSBGsAQlsAFgI0I8IFgBGwBZsAQlELAFJbAp4LApIEVlRLAHJRCwBiWwKeCwBSWwCCUIIFgCGwNZsAUlsAMlQ0iwBCWwByUIsAYlsAMlsAFgQ0gbIVkhISEhISEhLSwCsAQlICBGsAQlI0KwBSUIsAMlRUghISEhLSwCsAMlILAEJQiwAiVDSCEhIS0sRSMgRRggsABQIFgjZSNZI2ggsEBQWCGwQFkjWGVZimBELSxLUyNLUVpYIEWKYEQbISFZLSxLVFggRYpgRBshIVktLEtTI0tRWlg4GyEhWS0ssAAhS1RYOBshIVktLLACQ1RYsEYrGyEhISFZLSywAkNUWLBHKxshISFZLSywAkNUWLBIKxshISEhWS0ssAJDVFiwSSsbISEhWS0sIIoII0tTiktRWlgjOBshIVktLACwAiVJsABTWCCwQDgRGyFZLSwBRiNGYCNGYSMgECBGimG4/4BiirFAQIpwRWBoOi0sIIojSWSKI1NYPBshWS0sS1JYfRt6WS0ssBIASwFLVEItLLECAEKxIwGIUbFAAYhTWli5EAAAIIhUWLICAQJDYEJZsSQBiFFYuSAAAECIVFiyAgICQ2BCsSQBiFRYsgIgAkNgQgBLAUtSWLICCAJDYEJZG7lAAACAiFRYsgIEAkNgQlm5QAAAgGO4AQCIVFiyAggCQ2BCWblAAAEAY7gCAIhUWLICEAJDYEJZsSYBiFFYuUAAAgBjuAQAiFRYsgJAAkNgQlm5QAAEAGO4CACIVFiyAoACQ2BCWVlZWVlZsQACQ1RYQAoFQAhACUAMAg0CG7EBAkNUWLIFQAi6AQAACQEAswwBDQEbsYACQ1JYsgVACLgBgLEJQBuyBUAIugGAAAkBQFm5QAAAgIhVuUAAAgBjuAQAiFVaWLMMAA0BG7MMAA0BWVlZQkJCQkItLEUYaCNLUVgjIEUgZLBAUFh8WWiKYFlELSywABawAiWwAiUBsAEjPgCwAiM+sQECBgywCiNlQrALI0IBsAEjPwCwAiM/sQECBgywBiNlQrAHI0KwARYBLSywgLACQ1CwAbACQ1RbWCEjELAgGskbihDtWS0ssFkrLSyKEOUtQJkJIUggVSABHlUfSANVHx4BDx4/Hq8eA01LJh9MSzMfS0YlHyY0EFUlMyRVGRP/HwcE/x8GA/8fSkkzH0lGJR8TMxJVBQEDVQQzA1UfAwEPAz8DrwMDR0YZH+tGASMzIlUcMxtVFjMVVREBD1UQMw9VDw9PDwIfD88PAg8P/w8CBgIBAFUBMwBVbwB/AK8A7wAEEAABgBYBBQG4AZCxVFMrK0u4B/9SS7AJUFuwAYiwJVOwAYiwQFFasAaIsABVWltYsQEBjlmFjY0AQh1LsDJTWLAgHVlLsGRTWLAQHbEWAEJZc3MrK15zdHUrKysrK3Qrc3QrKysrKysrKysrKysrc3QrKysYXgAAAAYUABcATgW2ABcAdQW2Bc0AAAAAAAAAAAAAAAAAAARIABQAkQAA/+wAAAAA/+wAAAAA/+wAAP4U/+wAAAW2ABP8lP/t/oX/6v6p/+wAGP68AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAIsAgQDdAJgAjwCOAJkAiACBAQ8AigAAAAAADQCiAAMAAQQJAAAAcgAAAAMAAQQJAAEAEgByAAMAAQQJAAIADgCEAAMAAQQJAAMANACSAAMAAQQJAAQAIgDGAAMAAQQJAAUAGADoAAMAAQQJAAYAIAEAAAMAAQQJAAcApAEgAAMAAQQJAAgAKAHEAAMAAQQJAAsAOAHsAAMAAQQJAAwAXAIkAAMAAQQJAA0AXAKAAAMAAQQJAA4AVALcAEQAaQBnAGkAdABpAHoAZQBkACAAZABhAHQAYQAgAGMAbwBwAHkAcgBpAGcAaAB0ACAAqQAgADIAMAAxADAALQAyADAAMQAxACwAIABHAG8AbwBnAGwAZQAgAEMAbwByAHAAbwByAGEAdABpAG8AbgAuAE8AcABlAG4AIABTAGEAbgBzAFIAZQBnAHUAbABhAHIAMQAuADEAMAA7ADEAQQBTAEMAOwBPAHAAZQBuAFMAYQBuAHMALQBSAGUAZwB1AGwAYQByAE8AcABlAG4AIABTAGEAbgBzACAAUgBlAGcAdQBsAGEAcgBWAGUAcgBzAGkAbwBuACAAMQAuADEAMABPAHAAZQBuAFMAYQBuAHMALQBSAGUAZwB1AGwAYQByAE8AcABlAG4AIABTAGEAbgBzACAAaQBzACAAYQAgAHQAcgBhAGQAZQBtAGEAcgBrACAAbwBmACAARwBvAG8AZwBsAGUAIABhAG4AZAAgAG0AYQB5ACAAYgBlACAAcgBlAGcAaQBzAHQAZQByAGUAZAAgAGkAbgAgAGMAZQByAHQAYQBpAG4AIABqAHUAcgBpAHMAZABpAGMAdABpAG8AbgBzAC4AQQBzAGMAZQBuAGQAZQByACAAQwBvAHIAcABvAHIAYQB0AGkAbwBuAGgAdAB0AHAAOgAvAC8AdwB3AHcALgBhAHMAYwBlAG4AZABlAHIAYwBvAHIAcAAuAGMAbwBtAC8AaAB0AHQAcAA6AC8ALwB3AHcAdwAuAGEAcwBjAGUAbgBkAGUAcgBjAG8AcgBwAC4AYwBvAG0ALwB0AHkAcABlAGQAZQBzAGkAZwBuAGUAcgBzAC4AaAB0AG0AbABMAGkAYwBlAG4AcwBlAGQAIAB1AG4AZABlAHIAIAB0AGgAZQAgAEEAcABhAGMAaABlACAATABpAGMAZQBuAHMAZQAsACAAVgBlAHIAcwBpAG8AbgAgADIALgAwAGgAdAB0AHAAOgAvAC8AdwB3AHcALgBhAHAAYQBjAGgAZQAuAG8AcgBnAC8AbABpAGMAZQBuAHMAZQBzAC8ATABJAEMARQBOAFMARQAtADIALgAwAAAAAgAAAAAAAP9mAGYAAAAAAAAAAAAAAAAAAAAAAAAAAAOqAAABAgACAAMABAAFAAYABwAIAAkACgALAAwADQAOAA8AEAARABIAEwAUABUAFgAXABgAGQAaABsAHAAdAB4AHwAgACEAIgAjACQAJQAmACcAKAApACoAKwEDAC0ALgAvADAAMQAyADMANAA1ADYANwA4ADkAOgA7ADwAPQA+AD8AQABBAEIAQwBEAEUARgBHAEgASQBKAEsATABNAE4ATwBQAFEAUgBTAFQAVQBWAFcAWABZAFoAWwBcAF0AXgBfAGAAYQCsAKMAhACFAL0AlgDoAIYAjgCLAJ0AqQCkAQQAigEFAIMAkwDyAPMAjQCXAIgAwwDeAPEAngCqAPUA9AD2AKIArQDJAMcArgBiAGMAkABkAMsAZQDIAMoBBgEHAQgBCQDpAGYA0wDQANEArwBnAPAAkQDWANQA1QBoAOsA7QCJAGoAaQBrAG0AbABuAKAAbwBxAHAAcgBzAHUAdAB2AHcA6gB4AHoAeQB7AH0AfAC4AKEAfwB+AIAAgQDsAO4AugEKAQsBDAENAQ4BDwD9AP4BEAERARIBEwD/AQABFAEVARYBAQEXARgBGQEaARsBHAEdAR4BHwEgASEBIgD4APkBIwEkASUBJgEnASgBKQEqASsBLAEtAS4BLwEwATEBMgEzANcBNAE1ATYBNwE4ATkBOgE7ATwBPQE+AT8BQAFBAUIA4gDjAUMBRAFFAUYBRwFIAUkBSgFLAUwBTQFOAU8BUAFRALAAsQFSAVMBVAFVAVYBVwFYAVkBWgFbAPsA/ADkAOUBXAFdAV4BXwFgAWEBYgFjAWQBZQFmAWcBaAFpAWoBawFsAW0BbgFvAXABcQC7AXIBcwF0AXUA5gDnAXYApgF3AXgBeQF6AXsBfAF9AX4A2ADhANoA2wDcAN0A4ADZAN8BfwGAAYEBggGDAYQBhQGGAYcBiAGJAYoBiwGMAY0BjgGPAZABkQGSAZMBlAGVAZYBlwGYAZkBmgGbAZwBnQGeAZ8BoAGhAaIBowGkAaUBpgGnAagBqQGqAasBrAGtAa4BrwGwAbEBsgGzAbQBtQG2AbcAmwG4AbkBugG7AbwBvQG+Ab8BwAHBAcIBwwHEAcUBxgHHAcgByQHKAcsBzAHNAc4BzwHQAdEB0gHTAdQB1QHWAdcB2AHZAdoB2wHcAd0B3gHfAeAB4QHiAeMB5AHlAeYB5wHoAekB6gHrAewB7QHuAe8B8AHxAfIB8wH0AfUB9gH3AfgB+QH6AfsB/AH9Af4B/wIAAgECAgIDAgQCBQIGAgcCCAIJAgoCCwIMAg0CDgIPAhACEQISAhMCFAIVAhYCFwIYAhkCGgIbAhwCHQIeAh8CIAIhAiICIwIkAiUCJgInAigCKQIqAisAsgCzAiwCLQC2ALcAxAIuALQAtQDFAIIAwgCHAKsAxgIvAjAAvgC/AjEAvAIyAPcCMwI0AjUCNgI3AjgAjACfAjkCOgI7AjwCPQCYAKgAmgCZAO8ApQCSAJwApwCPAJQAlQC5Aj4CPwJAAkECQgJDAkQCRQJGAkcCSAJJAkoCSwJMAk0CTgJPAlACUQJSAlMCVAJVAlYCVwJYAlkCWgJbAlwCXQJeAl8CYAJhAmICYwJkAmUCZgJnAmgCaQJqAmsCbAJtAm4CbwJwAnECcgJzAnQCdQJ2AncCeAJ5AnoCewJ8An0CfgJ/AoACgQKCAoMChAKFAoYChwKIAokCigKLAowCjQKOAo8CkAKRApICkwKUApUClgKXApgCmQKaApsCnAKdAp4CnwKgAqECogKjAqQCpQKmAqcCqAKpAqoCqwKsAq0CrgKvArACsQKyArMCtAK1ArYCtwK4ArkCugK7ArwCvQK+Ar8CwALBAsICwwLEAsUCxgLHAsgCyQLKAssCzALNAs4CzwLQAtEC0gLTAtQC1QLWAtcC2ALZAtoC2wLcAt0C3gLfAuAC4QLiAuMC5ALlAuYC5wLoAukC6gLrAuwC7QLuAu8C8ALxAvIC8wL0AvUC9gL3AvgC+QL6AvsC/AL9Av4C/wMAAwEDAgMDAwQDBQMGAwcDCAMJAwoDCwMMAw0DDgMPAxADEQMSAxMDFAMVAxYDFwMYAxkDGgMbAxwDHQMeAx8DIAMhAyIDIwMkAyUDJgMnAygDKQMqAysDLAMtAy4DLwMwAzEDMgMzAzQDNQM2AzcDOAM5AzoDOwM8Az0DPgM/A0ADQQNCA0MDRANFA0YDRwNIA0kDSgNLA0wDTQNOA08DUANRA1IDUwNUA1UDVgNXA1gDWQNaA1sDXANdA14DXwNgA2EDYgNjA2QDZQNmA2cDaANpA2oDawNsA20DbgNvA3ADcQNyA3MDdAN1A3YDdwN4A3kDegN7A3wDfQN+A38DgAOBA4IDgwOEA4UDhgOHA4gDiQOKA4sDjAONA44DjwOQA5EDkgOTA5QDlQOWA5cDmAOZA5oDmwOcA50DngOfACwAzwDMAM0AzgOgA6EDogOjAPoDpAOlA6YDpwOoA6kDqgOrA6wDrQRudWxsBUkuYWx0B3VuaTAwQUQJb3ZlcnNjb3JlCklncmF2ZS5hbHQKSWFjdXRlLmFsdA9JY2lyY3VtZmxleC5hbHQNSWRpZXJlc2lzLmFsdAdBbWFjcm9uB2FtYWNyb24GQWJyZXZlBmFicmV2ZQdBb2dvbmVrB2FvZ29uZWsLQ2NpcmN1bWZsZXgLY2NpcmN1bWZsZXgEQ2RvdARjZG90BkRjYXJvbgZkY2Fyb24GRGNyb2F0B0VtYWNyb24HZW1hY3JvbgZFYnJldmUGZWJyZXZlCkVkb3RhY2NlbnQKZWRvdGFjY2VudAdFb2dvbmVrB2VvZ29uZWsGRWNhcm9uBmVjYXJvbgtHY2lyY3VtZmxleAtnY2lyY3VtZmxleARHZG90BGdkb3QMR2NvbW1hYWNjZW50DGdjb21tYWFjY2VudAtIY2lyY3VtZmxleAtoY2lyY3VtZmxleARIYmFyBGhiYXIKSXRpbGRlLmFsdAZpdGlsZGULSW1hY3Jvbi5hbHQHaW1hY3JvbgpJYnJldmUuYWx0BmlicmV2ZQtJb2dvbmVrLmFsdAdpb2dvbmVrDklkb3RhY2NlbnQuYWx0BklKLmFsdAJpagtKY2lyY3VtZmxleAtqY2lyY3VtZmxleAxLY29tbWFhY2NlbnQMa2NvbW1hYWNjZW50DGtncmVlbmxhbmRpYwZMYWN1dGUGbGFjdXRlDExjb21tYWFjY2VudAxsY29tbWFhY2NlbnQGTGNhcm9uBmxjYXJvbgRMZG90BGxkb3QGTmFjdXRlBm5hY3V0ZQxOY29tbWFhY2NlbnQMbmNvbW1hYWNjZW50Bk5jYXJvbgZuY2Fyb24LbmFwb3N0cm9waGUDRW5nA2VuZwdPbWFjcm9uB29tYWNyb24GT2JyZXZlBm9icmV2ZQ1PaHVuZ2FydW1sYXV0DW9odW5nYXJ1bWxhdXQGUmFjdXRlBnJhY3V0ZQxSY29tbWFhY2NlbnQMcmNvbW1hYWNjZW50BlJjYXJvbgZyY2Fyb24GU2FjdXRlBnNhY3V0ZQtTY2lyY3VtZmxleAtzY2lyY3VtZmxleAxUY29tbWFhY2NlbnQMdGNvbW1hYWNjZW50BlRjYXJvbgZ0Y2Fyb24EVGJhcgR0YmFyBlV0aWxkZQZ1dGlsZGUHVW1hY3Jvbgd1bWFjcm9uBlVicmV2ZQZ1YnJldmUFVXJpbmcFdXJpbmcNVWh1bmdhcnVtbGF1dA11aHVuZ2FydW1sYXV0B1VvZ29uZWsHdW9nb25lawtXY2lyY3VtZmxleAt3Y2lyY3VtZmxleAtZY2lyY3VtZmxleAt5Y2lyY3VtZmxleAZaYWN1dGUGemFjdXRlClpkb3RhY2NlbnQKemRvdGFjY2VudAVsb25ncwpBcmluZ2FjdXRlCmFyaW5nYWN1dGUHQUVhY3V0ZQdhZWFjdXRlC09zbGFzaGFjdXRlC29zbGFzaGFjdXRlDFNjb21tYWFjY2VudAxzY29tbWFhY2NlbnQFdG9ub3MNZGllcmVzaXN0b25vcwpBbHBoYXRvbm9zCWFub3RlbGVpYQxFcHNpbG9udG9ub3MIRXRhdG9ub3MNSW90YXRvbm9zLmFsdAxPbWljcm9udG9ub3MMVXBzaWxvbnRvbm9zCk9tZWdhdG9ub3MRaW90YWRpZXJlc2lzdG9ub3MFQWxwaGEEQmV0YQVHYW1tYQd1bmkwMzk0B0Vwc2lsb24EWmV0YQNFdGEFVGhldGEISW90YS5hbHQFS2FwcGEGTGFtYmRhAk11Ak51AlhpB09taWNyb24CUGkDUmhvBVNpZ21hA1RhdQdVcHNpbG9uA1BoaQNDaGkDUHNpB3VuaTAzQTkQSW90YWRpZXJlc2lzLmFsdA9VcHNpbG9uZGllcmVzaXMKYWxwaGF0b25vcwxlcHNpbG9udG9ub3MIZXRhdG9ub3MJaW90YXRvbm9zFHVwc2lsb25kaWVyZXNpc3Rvbm9zBWFscGhhBGJldGEFZ2FtbWEFZGVsdGEHZXBzaWxvbgR6ZXRhA2V0YQV0aGV0YQRpb3RhBWthcHBhBmxhbWJkYQd1bmkwM0JDAm51AnhpB29taWNyb24DcmhvBnNpZ21hMQVzaWdtYQN0YXUHdXBzaWxvbgNwaGkDY2hpA3BzaQVvbWVnYQxpb3RhZGllcmVzaXMPdXBzaWxvbmRpZXJlc2lzDG9taWNyb250b25vcwx1cHNpbG9udG9ub3MKb21lZ2F0b25vcwlhZmlpMTAwMjMJYWZpaTEwMDUxCWFmaWkxMDA1MglhZmlpMTAwNTMJYWZpaTEwMDU0DWFmaWkxMDA1NS5hbHQNYWZpaTEwMDU2LmFsdAlhZmlpMTAwNTcJYWZpaTEwMDU4CWFmaWkxMDA1OQlhZmlpMTAwNjAJYWZpaTEwMDYxCWFmaWkxMDA2MglhZmlpMTAxNDUJYWZpaTEwMDE3CWFmaWkxMDAxOAlhZmlpMTAwMTkJYWZpaTEwMDIwCWFmaWkxMDAyMQlhZmlpMTAwMjIJYWZpaTEwMDI0CWFmaWkxMDAyNQlhZmlpMTAwMjYJYWZpaTEwMDI3CWFmaWkxMDAyOAlhZmlpMTAwMjkJYWZpaTEwMDMwCWFmaWkxMDAzMQlhZmlpMTAwMzIJYWZpaTEwMDMzCWFmaWkxMDAzNAlhZmlpMTAwMzUJYWZpaTEwMDM2CWFmaWkxMDAzNwlhZmlpMTAwMzgJYWZpaTEwMDM5CWFmaWkxMDA0MAlhZmlpMTAwNDEJYWZpaTEwMDQyCWFmaWkxMDA0MwlhZmlpMTAwNDQJYWZpaTEwMDQ1CWFmaWkxMDA0NglhZmlpMTAwNDcJYWZpaTEwMDQ4CWFmaWkxMDA0OQlhZmlpMTAwNjUJYWZpaTEwMDY2CWFmaWkxMDA2NwlhZmlpMTAwNjgJYWZpaTEwMDY5CWFmaWkxMDA3MAlhZmlpMTAwNzIJYWZpaTEwMDczCWFmaWkxMDA3NAlhZmlpMTAwNzUJYWZpaTEwMDc2CWFmaWkxMDA3NwlhZmlpMTAwNzgJYWZpaTEwMDc5CWFmaWkxMDA4MAlhZmlpMTAwODEJYWZpaTEwMDgyCWFmaWkxMDA4MwlhZmlpMTAwODQJYWZpaTEwMDg1CWFmaWkxMDA4NglhZmlpMTAwODcJYWZpaTEwMDg4CWFmaWkxMDA4OQlhZmlpMTAwOTAJYWZpaTEwMDkxCWFmaWkxMDA5MglhZmlpMTAwOTMJYWZpaTEwMDk0CWFmaWkxMDA5NQlhZmlpMTAwOTYJYWZpaTEwMDk3CWFmaWkxMDA3MQlhZmlpMTAwOTkJYWZpaTEwMTAwCWFmaWkxMDEwMQlhZmlpMTAxMDIJYWZpaTEwMTAzCWFmaWkxMDEwNAlhZmlpMTAxMDUJYWZpaTEwMTA2CWFmaWkxMDEwNwlhZmlpMTAxMDgJYWZpaTEwMTA5CWFmaWkxMDExMAlhZmlpMTAxOTMJYWZpaTEwMDUwCWFmaWkxMDA5OAZXZ3JhdmUGd2dyYXZlBldhY3V0ZQZ3YWN1dGUJV2RpZXJlc2lzCXdkaWVyZXNpcwZZZ3JhdmUGeWdyYXZlCWFmaWkwMDIwOA11bmRlcnNjb3JlZGJsDXF1b3RlcmV2ZXJzZWQGbWludXRlBnNlY29uZAlleGNsYW1kYmwJbnN1cGVyaW9yCWFmaWkwODk0MQZwZXNldGEERXVybwlhZmlpNjEyNDgJYWZpaTYxMjg5CWFmaWk2MTM1Mgllc3RpbWF0ZWQJb25lZWlnaHRoDHRocmVlZWlnaHRocwtmaXZlZWlnaHRocwxzZXZlbmVpZ2h0aHMHdW5pRkIwMQd1bmlGQjAyDWN5cmlsbGljYnJldmUIZG90bGVzc2oQY2Fyb25jb21tYWFjY2VudAtjb21tYWFjY2VudBFjb21tYWFjY2VudHJvdGF0ZQx6ZXJvc3VwZXJpb3IMZm91cnN1cGVyaW9yDGZpdmVzdXBlcmlvcgtzaXhzdXBlcmlvcg1zZXZlbnN1cGVyaW9yDWVpZ2h0c3VwZXJpb3IMbmluZXN1cGVyaW9yB3VuaTIwMDAHdW5pMjAwMQd1bmkyMDAyB3VuaTIwMDMHdW5pMjAwNAd1bmkyMDA1B3VuaTIwMDYHdW5pMjAwNwd1bmkyMDA4B3VuaTIwMDkHdW5pMjAwQQd1bmkyMDBCB3VuaUZFRkYHdW5pRkZGQwd1bmlGRkZEB3VuaTAxRjAHdW5pMDJCQwd1bmkwM0QxB3VuaTAzRDIHdW5pMDNENgd1bmkxRTNFB3VuaTFFM0YHdW5pMUUwMAd1bmkxRTAxB3VuaTFGNEQHdW5pMDJGMwlkYXNpYW94aWEHdW5pRkIwMwd1bmlGQjA0BU9ob3JuBW9ob3JuBVVob3JuBXVob3JuB3VuaTAzMDAHdW5pMDMwMQd1bmkwMzAzBGhvb2sIZG90YmVsb3cHdW5pMDQwMAd1bmkwNDBEB3VuaTA0NTAHdW5pMDQ1RAd1bmkwNDYwB3VuaTA0NjEHdW5pMDQ2Mgd1bmkwNDYzB3VuaTA0NjQHdW5pMDQ2NQd1bmkwNDY2B3VuaTA0NjcHdW5pMDQ2OAd1bmkwNDY5B3VuaTA0NkEHdW5pMDQ2Qgd1bmkwNDZDB3VuaTA0NkQHdW5pMDQ2RQd1bmkwNDZGB3VuaTA0NzAHdW5pMDQ3MQd1bmkwNDcyB3VuaTA0NzMHdW5pMDQ3NAd1bmkwNDc1B3VuaTA0NzYHdW5pMDQ3Nwd1bmkwNDc4B3VuaTA0NzkHdW5pMDQ3QQd1bmkwNDdCB3VuaTA0N0MHdW5pMDQ3RAd1bmkwNDdFB3VuaTA0N0YHdW5pMDQ4MAd1bmkwNDgxB3VuaTA0ODIHdW5pMDQ4Mwd1bmkwNDg0B3VuaTA0ODUHdW5pMDQ4Ngd1bmkwNDg4B3VuaTA0ODkHdW5pMDQ4QQd1bmkwNDhCB3VuaTA0OEMHdW5pMDQ4RAd1bmkwNDhFB3VuaTA0OEYHdW5pMDQ5Mgd1bmkwNDkzB3VuaTA0OTQHdW5pMDQ5NQd1bmkwNDk2B3VuaTA0OTcHdW5pMDQ5OAd1bmkwNDk5B3VuaTA0OUEHdW5pMDQ5Qgd1bmkwNDlDB3VuaTA0OUQHdW5pMDQ5RQd1bmkwNDlGB3VuaTA0QTAHdW5pMDRBMQd1bmkwNEEyB3VuaTA0QTMHdW5pMDRBNAd1bmkwNEE1B3VuaTA0QTYHdW5pMDRBNwd1bmkwNEE4B3VuaTA0QTkHdW5pMDRBQQd1bmkwNEFCB3VuaTA0QUMHdW5pMDRBRAd1bmkwNEFFB3VuaTA0QUYHdW5pMDRCMAd1bmkwNEIxB3VuaTA0QjIHdW5pMDRCMwd1bmkwNEI0B3VuaTA0QjUHdW5pMDRCNgd1bmkwNEI3B3VuaTA0QjgHdW5pMDRCOQd1bmkwNEJBB3VuaTA0QkIHdW5pMDRCQwd1bmkwNEJEB3VuaTA0QkUHdW5pMDRCRgt1bmkwNEMwLmFsdAd1bmkwNEMxB3VuaTA0QzIHdW5pMDRDMwd1bmkwNEM0B3VuaTA0QzUHdW5pMDRDNgd1bmkwNEM3B3VuaTA0QzgHdW5pMDRDOQd1bmkwNENBB3VuaTA0Q0IHdW5pMDRDQwd1bmkwNENEB3VuaTA0Q0ULdW5pMDRDRi5hbHQHdW5pMDREMAd1bmkwNEQxB3VuaTA0RDIHdW5pMDREMwd1bmkwNEQ0B3VuaTA0RDUHdW5pMDRENgd1bmkwNEQ3B3VuaTA0RDgHdW5pMDREOQd1bmkwNERBB3VuaTA0REIHdW5pMDREQwd1bmkwNEREB3VuaTA0REUHdW5pMDRERgd1bmkwNEUwB3VuaTA0RTEHdW5pMDRFMgd1bmkwNEUzB3VuaTA0RTQHdW5pMDRFNQd1bmkwNEU2B3VuaTA0RTcHdW5pMDRFOAd1bmkwNEU5B3VuaTA0RUEHdW5pMDRFQgd1bmkwNEVDB3VuaTA0RUQHdW5pMDRFRQd1bmkwNEVGB3VuaTA0RjAHdW5pMDRGMQd1bmkwNEYyB3VuaTA0RjMHdW5pMDRGNAd1bmkwNEY1B3VuaTA0RjYHdW5pMDRGNwd1bmkwNEY4B3VuaTA0RjkHdW5pMDRGQQd1bmkwNEZCB3VuaTA0RkMHdW5pMDRGRAd1bmkwNEZFB3VuaTA0RkYHdW5pMDUwMAd1bmkwNTAxB3VuaTA1MDIHdW5pMDUwMwd1bmkwNTA0B3VuaTA1MDUHdW5pMDUwNgd1bmkwNTA3B3VuaTA1MDgHdW5pMDUwOQd1bmkwNTBBB3VuaTA1MEIHdW5pMDUwQwd1bmkwNTBEB3VuaTA1MEUHdW5pMDUwRgd1bmkwNTEwB3VuaTA1MTEHdW5pMDUxMgd1bmkwNTEzB3VuaTFFQTAHdW5pMUVBMQd1bmkxRUEyB3VuaTFFQTMHdW5pMUVBNAd1bmkxRUE1B3VuaTFFQTYHdW5pMUVBNwd1bmkxRUE4B3VuaTFFQTkHdW5pMUVBQQd1bmkxRUFCB3VuaTFFQUMHdW5pMUVBRAd1bmkxRUFFB3VuaTFFQUYHdW5pMUVCMAd1bmkxRUIxB3VuaTFFQjIHdW5pMUVCMwd1bmkxRUI0B3VuaTFFQjUHdW5pMUVCNgd1bmkxRUI3B3VuaTFFQjgHdW5pMUVCOQd1bmkxRUJBB3VuaTFFQkIHdW5pMUVCQwd1bmkxRUJEB3VuaTFFQkUHdW5pMUVCRgd1bmkxRUMwB3VuaTFFQzEHdW5pMUVDMgd1bmkxRUMzB3VuaTFFQzQHdW5pMUVDNQd1bmkxRUM2B3VuaTFFQzcLdW5pMUVDOC5hbHQHdW5pMUVDOQt1bmkxRUNBLmFsdAd1bmkxRUNCB3VuaTFFQ0MHdW5pMUVDRAd1bmkxRUNFB3VuaTFFQ0YHdW5pMUVEMAd1bmkxRUQxB3VuaTFFRDIHdW5pMUVEMwd1bmkxRUQ0B3VuaTFFRDUHdW5pMUVENgd1bmkxRUQ3B3VuaTFFRDgHdW5pMUVEOQd1bmkxRURBB3VuaTFFREIHdW5pMUVEQwd1bmkxRUREB3VuaTFFREUHdW5pMUVERgd1bmkxRUUwB3VuaTFFRTEHdW5pMUVFMgd1bmkxRUUzB3VuaTFFRTQHdW5pMUVFNQd1bmkxRUU2B3VuaTFFRTcHdW5pMUVFOAd1bmkxRUU5B3VuaTFFRUEHdW5pMUVFQgd1bmkxRUVDB3VuaTFFRUQHdW5pMUVFRQd1bmkxRUVGB3VuaTFFRjAHdW5pMUVGMQd1bmkxRUY0B3VuaTFFRjUHdW5pMUVGNgd1bmkxRUY3B3VuaTFFRjgHdW5pMUVGOQd1bmkyMEFCB3VuaTAzMEYTY2lyY3VtZmxleGFjdXRlY29tYhNjaXJjdW1mbGV4Z3JhdmVjb21iEmNpcmN1bWZsZXhob29rY29tYhNjaXJjdW1mbGV4dGlsZGVjb21iDmJyZXZlYWN1dGVjb21iDmJyZXZlZ3JhdmVjb21iDWJyZXZlaG9va2NvbWIOYnJldmV0aWxkZWNvbWIQY3lyaWxsaWNob29rbGVmdBFjeXJpbGxpY2JpZ2hvb2tVQxFjeXJpbGxpY2JpZ2hvb2tMQwhvbmUucG51bQd6ZXJvLm9zBm9uZS5vcwZ0d28ub3MIdGhyZWUub3MHZm91ci5vcwdmaXZlLm9zBnNpeC5vcwhzZXZlbi5vcwhlaWdodC5vcwduaW5lLm9zAmZmB3VuaTIxMjAIVGNlZGlsbGEIdGNlZGlsbGEFZy5hbHQPZ2NpcmN1bWZsZXguYWx0CmdicmV2ZS5hbHQIZ2RvdC5hbHQQZ2NvbW1hYWNjZW50LmFsdAZJdGlsZGUHSW1hY3JvbgZJYnJldmUHSW9nb25lawJJSglJb3RhdG9ub3MESW90YQxJb3RhZGllcmVzaXMJYWZpaTEwMDU1CWFmaWkxMDA1Ngd1bmkwNEMwB3VuaTA0Q0YHdW5pMUVDOAd1bmkxRUNBAAABAAMACAAKAA0AB///AA8AAQAAAAwAAAAAAAAAAgAFAAACNQABAjcCNwABAjsCWwABAl0DdgABA4IDqQABAAAAAQAAAAoADAAOAAAAAAAAAAEAAAAKAG4BWgABbGF0bgAIABAAAk1PTCAAKFJPTSAAQgAA//8ACQADAAgACwAAAA4AEQAUABcAGgAA//8ACgAEAAYACQAMAAEADwASABUAGAAbAAD//wAKAAUABwAKAA0AAgAQABMAFgAZABwAHWxpZ2EAsGxpZ2EAsGxpZ2EAsGxudW0AtmxudW0AtmxudW0AtmxvY2wAvGxvY2wAvG9udW0Awm9udW0Awm9udW0AwnBudW0AynBudW0AynBudW0AynNhbHQA0HNhbHQA0HNhbHQA0HNzMDEA0HNzMDEA0HNzMDEA0HNzMDIA2HNzMDIA2HNzMDIA2HNzMDMA3nNzMDMA3nNzMDMA3nRudW0A5HRudW0A5HRudW0A5AAAAAEACQAAAAEABwAAAAEACAAAAAIAAgADAAAAAQAEAAAAAgAAAAEAAAABAAAAAAABAAEAAAACAAUABgAKABYAPAB8AJQAzADgAO4BAgEuAVAAAQAAAAEACAACABAABQORA5IDkwOUA5UAAQAFAEoA3wDhAOMA5QABAAAAAQAIAAIALgAUACwAjgCPAJAAkQDqAOwA7gDwAPIA9AFaAWcBdwGhAaICyQLYA0UDRwACAAEDlgOpAAAAAQAAAAEACAABAAYDcAACAAEAEwAcAAAAAQAAAAEACAACABoACgODA4UDhgOHA4gDiQOKA4sDjAOEAAIAAwATABMAAAAVABwAAQOCA4IACQABAAAAAQAIAAEABgNuAAEAAQAUAAEAAAABAAgAAQA8/JAAAQAAAAEACAABAAb8kgABAAEDggABAAAAAQAIAAIAGgAKABMDggAVABYAFwAYABkAGgAbABwAAgABA4MDjAAAAAEAAAABAAgAAgAOAAQDjwOQASABIQABAAQBJAElAUkBSgAEAAAAAQAIAAEANgABAAgABQAMABQAHAAiACgCXgADAEkATwJdAAMASQBMA40AAgBJAjUAAgBPAjQAAgBMAAEAAQBJAAA=") format("truetype");font-weight:400;font-style:normal}.w2ui-reset{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;-ms-box-sizing:border-box;-o-box-sizing:border-box;box-sizing:border-box;font-family:OpenSans;font-size:12px}.w2ui-reset *{color:default;line-height:100%;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;-ms-box-sizing:border-box;-o-box-sizing:border-box;box-sizing:border-box;margin:0;padding:0}.w2ui-reset table{max-width:none;background-color:transparent;border-collapse:separate;border-spacing:0;border:none}.w2ui-reset table tr td,.w2ui-reset table tr th{font-family:OpenSans;font-size:12px}.w2ui-reset input:not([type=button]):not([type=submit]):not([type=checkbox]):not([type=radio]),.w2ui-reset select,.w2ui-reset textarea{display:inline-block;width:auto;height:auto;vertical-align:baseline;padding:6px;margin:0;font-size:12px;background-color:#f8fafa;border:1px solid #e0e0e0}.w2ui-reset input:not([type=button]):not([type=submit]):not([type=checkbox]):not([type=radio]):focus,.w2ui-reset select:focus,.w2ui-reset textarea:focus{background-color:#fff}.w2ui-reset select{padding:5px;height:26px;font-size:12px}.w2ui-centered{position:absolute;left:0;right:0;top:0;bottom:0;display:flex;flex-wrap:wrap;align-items:center;justify-content:center;text-align:center;padding:10px}.w2ui-disabled,.w2ui-readonly{background-color:#f1f1f1;color:#777}div[contenteditable].w2ui-focus,input.w2ui-focus:not(button),select.w2ui-focus,textarea.w2ui-focus{outline-style:auto;outline-color:#72b2ff}div.w2ui-input:focus,select.w2ui-input:focus{outline-color:#72b2ff}input:not([type=button]):not([type=submit]).w2ui-input,textarea.w2ui-input{padding:6px;border:1px solid #e0e0e0;border-radius:3px;color:#000;background-color:#f8fafa;line-height:normal}input:not([type=button]):not([type=submit]).w2ui-input:focus,textarea.w2ui-input:focus{outline-color:#72b2ff;background-color:#fff}input:not([type=button]):not([type=submit]).w2ui-input:disabled,input:not([type=button]):not([type=submit]).w2ui-input[readonly],textarea.w2ui-input:disabled,textarea.w2ui-input[readonly]{background-color:#f1f1f1;color:#777}select.w2ui-input{color:#000;padding:0 20px 0 7px;line-height:1.8;border-radius:3px;border:1px solid #e0e0e0;-webkit-appearance:none;background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACgAAAALCAQAAACnzwd+AAAAcklEQVR4AcXMsQFBQQDG4P9tAgC0gJYRQJZgKQMwCqCku6vVAAAA+NJHP4KHOk0aV2pRw61n4BBmyOxKQ8I4ehZeuhd3HTx6DQEGZ7sBfr2OOOOj3Yi43kMKs9sZknofOexqZ8npMygwWZTX51CipP+YA1OiZJbYYg9lAAAAAElFTkSuQmCC),linear-gradient(to bottom,#f8f8f8 20%,#f8f8f8 50%,#f8f8f8 52%,#f8f8f8 100%);background-size:17px 6px,100% 100%;background-position:right center,left top;background-repeat:no-repeat,no-repeat}.w2ui-icon-expand:before{position:relative;top:1px;left:1px;content:' ';width:5px;height:5px;border:2px solid rgba(150,150,150,.8);border-bottom:0;border-left:0;transform:rotateZ(45deg)}.w2ui-icon-collapse:before{position:relative;top:-1px;left:3px;content:' ';width:5px;height:5px;border:2px solid rgba(150,150,150,.8);border-bottom:0;border-left:0;transform:rotateZ(135deg)}input[type=checkbox].w2ui-toggle{position:absolute;opacity:0;width:46px;height:22px;padding:0;margin:0;margin-left:2px}input[type=checkbox].w2ui-toggle:focus{box-shadow:0 0 1px 2px #a8cfff}input[type=checkbox].w2ui-toggle+div{display:inline-block;width:46px;height:22px;border:1px solid #bbb;border-radius:30px;background-color:#eee;transition-duration:.3s;transition-property:background-color,box-shadow;box-shadow:inset 0 0 0 0 rgba(0,0,0,.4);margin-left:2px}input[type=checkbox].w2ui-toggle.w2ui-small+div{width:30px;height:16px}input[type=checkbox].w2ui-toggle:focus+div{box-shadow:0 0 3px 2px #91baed}input[type=checkbox].w2ui-toggle:disabled+div{opacity:.3}input[type=checkbox].w2ui-toggle+div>div{float:left;width:22px;height:22px;border-radius:inherit;background:#f5f5f5;transition-duration:.3s;transition-property:transform,background-color,box-shadow;box-shadow:0 0 1px #323232,0 0 0 1px rgba(200,200,200,.6);pointer-events:none;margin-top:-1px;margin-left:-1px}input[type=checkbox].w2ui-toggle.w2ui-small+div>div{width:16px;height:16px}input[type=checkbox].w2ui-toggle:checked+div>div{transform:translate3d(24px,0,0);background-color:#fff}input[type=checkbox].w2ui-toggle.w2ui-small:checked+div>div{transform:translate3d(14px,0,0)}input[type=checkbox].w2ui-toggle:focus{outline:0}input[type=checkbox].w2ui-toggle:checked+div{border:1px solid #206fad;box-shadow:inset 0 0 0 12px #35a6eb}input[type=checkbox].w2ui-toggle:checked:focus+div{box-shadow:0 0 3px 2px #91baed,inset 0 0 0 12px #35a6eb}input[type=checkbox].w2ui-toggle:checked+div>div{box-shadow:0 2px 5px rgba(0,0,0,.3),0 0 0 1px #206fad}input[type=checkbox].w2ui-toggle.green:checked+div{border:1px solid #00a23f;box-shadow:inset 0 0 0 12px #54b350}input[type=checkbox].w2ui-toggle.green:checked:focus+div{box-shadow:0 0 3px 2px #91baed,inset 0 0 0 12px #54b350}input[type=checkbox].w2ui-toggle.green:checked+div>div{box-shadow:0 2px 5px rgba(0,0,0,.3),0 0 0 1px #00a23f}.w2ui-marker{background-color:rgba(214,161,252,.5)}.w2ui-spinner{display:inline-block;background-size:100%;background-repeat:no-repeat;background-image:url(data:image/gif;base64,R0lGODlhgACAAKIAAP///93d3bu7u5mZmQAA/wAAAAAAAAAAACH/C05FVFNDQVBFMi4wAwEAAAAh+QQFBQAEACwCAAIAfAB8AAAD/0i63P4wygYqmDjrzbtflvWNZGliYXiubKuloivPLlzReD7al+7/Eh5wSFQIi8hHYBkwHUmD6CD5YTJLz49USuVYraRsZ7vtar7XnQ1Kjpoz6LRHvGlz35O4nEPP2O94EnpNc2sef1OBGIOFMId/inB6jSmPdpGScR19EoiYmZobnBCIiZ95k6KGGp6ni4wvqxilrqBfqo6skLW2YBmjDa28r6Eosp27w8Rov8ekycqoqUHODrTRvXsQwArC2NLF29UM19/LtxO5yJd4Au4CK7DUNxPebG4e7+8n8iv2WmQ66BtoYpo/dvfacBjIkITBE9DGlMvAsOIIZjIUAixliv9ixYZVtLUos5GjwI8gzc3iCGghypQqrbFsme8lwZgLZtIcYfNmTJ34WPTUZw5oRxdD9w0z6iOpO15MgTh1BTTJUKos39jE+o/KS64IFVmsFfYT0aU7capdy7at27dw48qdS7eu3bt480I02vUbX2F/JxYNDImw4GiGE/P9qbhxVpWOI/eFKtlNZbWXuzlmG1mv58+gQ4seTbq06dOoU6vGQZJy0FNlMcV+czhQ7SQmYd8eMhPs5BxVdfcGEtV3buDBXQ+fURxx8oM6MT9P+Fh6dOrH2zavc13u9JXVJb520Vp8dvC76wXMuN5Sepm/1WtkEZHDefnzR9Qvsd9+/wi8+en3X0ntYVcSdAE+UN4zs7ln24CaLagghIxBaGF8kFGoIYV+Ybghh841GIyI5ICIFoklJsigihmimJOLEbLYIYwxSgigiZ+8l2KB+Ml4oo/w8dijjcrouCORKwIpnJIjMnkkksalNeR4fuBIm5UEYImhIlsGCeWNNJphpJdSTlkml1jWeOY6TnaRpppUctcmFW9mGSaZceYopH9zkjnjUe59iR5pdapWaGqHopboaYua1qije67GJ6CuJAAAIfkEBQUABAAsCgACAFcAMAAAA/9Iutz+ML5Ag7w46z0r5WAoSp43nihXVmnrdusrv+s332dt4Tyo9yOBUJD6oQBIQGs4RBlHySSKyczVTtHoidocPUNZaZAr9F5FYbGI3PWdQWn1mi36buLKFJvojsHjLnshdhl4L4IqbxqGh4gahBJ4eY1kiX6LgDN7fBmQEJI4jhieD4yhdJ2KkZk8oiSqEaatqBekDLKztBG2CqBACq4wJRi4PZu1sA2+v8C6EJexrBAD1AOBzsLE0g/V1UvYR9sN3eR6lTLi4+TlY1wz6Qzr8u1t6FkY8vNzZTxaGfn6mAkEGFDgL4LrDDJDyE4hEIbdHB6ESE1iD4oVLfLAqPETIsOODwmCDJlv5MSGJklaS6khAQAh+QQFBQAEACwfAAIAVwAwAAAD/0i63P5LSAGrvTjrNuf+YKh1nWieIumhbFupkivPBEzR+GnnfLj3ooFwwPqdAshAazhEGUXJJIrJ1MGOUamJ2jQ9QVltkCv0XqFh5IncBX01afGYnDqD40u2z76JK/N0bnxweC5sRB9vF34zh4gjg4uMjXobihWTlJUZlw9+fzSHlpGYhTminKSepqebF50NmTyor6qxrLO0L7YLn0ALuhCwCrJAjrUqkrjGrsIkGMW/BMEPJcphLgDaABjUKNEh29vdgTLLIOLpF80s5xrp8ORVONgi8PcZ8zlRJvf40tL8/QPYQ+BAgjgMxkPIQ6E6hgkdjoNIQ+JEijMsasNY0RQix4gKP+YIKXKkwJIFF6JMudFEAgAh+QQFBQAEACw8AAIAQgBCAAAD/kg0PPowykmrna3dzXvNmSeOFqiRaGoyaTuujitv8Gx/661HtSv8gt2jlwIChYtc0XjcEUnMpu4pikpv1I71astytkGh9wJGJk3QrXlcKa+VWjeSPZHP4Rtw+I2OW81DeBZ2fCB+UYCBfWRqiQp0CnqOj4J1jZOQkpOUIYx/m4oxg5cuAaYBO4Qop6c6pKusrDevIrG2rkwptrupXB67vKAbwMHCFcTFxhLIt8oUzLHOE9Cy0hHUrdbX2KjaENzey9Dh08jkz8Tnx83q66bt8PHy8/T19vf4+fr6AP3+/wADAjQmsKDBf6AOKjS4aaHDgZMeSgTQcKLDhBYPEswoA1BBAgAh+QQFBQAEACxOAAoAMABXAAAD7Ei6vPOjyUkrhdDqfXHm4OZ9YSmNpKmiqVqykbuysgvX5o2HcLxzup8oKLQQix0UcqhcVo5ORi+aHFEn02sDeuWqBGCBkbYLh5/NmnldxajX7LbPBK+PH7K6narfO/t+SIBwfINmUYaHf4lghYyOhlqJWgqDlAuAlwyBmpVnnaChoqOkpaanqKmqKgGtrq+wsbA1srW2ry63urasu764Jr/CAb3Du7nGt7TJsqvOz9DR0tPU1TIA2ACl2dyi3N/aneDf4uPklObj6OngWuzt7u/d8fLY9PXr9eFX+vv8+PnYlUsXiqC3c6PmUUgAACH5BAUFAAQALE4AHwAwAFcAAAPpSLrc/m7IAau9bU7MO9GgJ0ZgOI5leoqpumKt+1axPJO1dtO5vuM9yi8TlAyBvSMxqES2mo8cFFKb8kzWqzDL7Xq/4LB4TC6bz1yBes1uu9uzt3zOXtHv8xN+Dx/x/wJ6gHt2g3Rxhm9oi4yNjo+QkZKTCgGWAWaXmmOanZhgnp2goaJdpKGmp55cqqusrZuvsJays6mzn1m4uRAAvgAvuBW/v8GwvcTFxqfIycA3zA/OytCl0tPPO7HD2GLYvt7dYd/ZX99j5+Pi6tPh6+bvXuTuzujxXens9fr7YPn+7egRI9PPHrgpCQAAIfkEBQUABAAsPAA8AEIAQgAAA/lIutz+UI1Jq7026h2x/xUncmD5jehjrlnqSmz8vrE8u7V5z/m5/8CgcEgsGo/IpHLJbDqf0Kh0ShBYBdTXdZsdbb/Yrgb8FUfIYLMDTVYz2G13FV6Wz+lX+x0fdvPzdn9WeoJGAYcBN39EiIiKeEONjTt0kZKHQGyWl4mZdREAoQAcnJhBXBqioqSlT6qqG6WmTK+rsa1NtaGsuEu6o7yXubojsrTEIsa+yMm9SL8osp3PzM2cStDRykfZ2tfUtS/bRd3ewtzV5pLo4eLjQuUp70Hx8t9E9eqO5Oku5/ztdkxi90qPg3x2EMpR6IahGocPCxp8AGtigwQAIfkEBQUABAAsHwBOAFcAMAAAA/9Iutz+MMo36pg4682J/V0ojs1nXmSqSqe5vrDXunEdzq2ta3i+/5DeCUh0CGnF5BGULC4tTeUTFQVONYAs4CfoCkZPjFar83rBx8l4XDObSUL1Ott2d1U4yZwcs5/xSBB7dBMBhgEYfncrTBGDW4WHhomKUY+QEZKSE4qLRY8YmoeUfkmXoaKInJ2fgxmpqqulQKCvqRqsP7WooriVO7u8mhu5NacasMTFMMHCm8qzzM2RvdDRK9PUwxzLKdnaz9y/Kt8SyR3dIuXmtyHpHMcd5+jvWK4i8/TXHff47SLjQvQLkU+fG29rUhQ06IkEG4X/Rryp4mwUxSgLL/7IqFETB8eONT6ChCFy5ItqJomES6kgAQAh+QQFBQAEACwKAE4AVwAwAAAD/0i63A4QuEmrvTi3yLX/4MeNUmieITmibEuppCu3sDrfYG3jPKbHveDktxIaF8TOcZmMLI9NyBPanFKJp4A2IBx4B5lkdqvtfb8+HYpMxp3Pl1qLvXW/vWkli16/3dFxTi58ZRcChwIYf3hWBIRchoiHiotWj5AVkpIXi4xLjxiaiJR/T5ehoomcnZ+EGamqq6VGoK+pGqxCtaiiuJVBu7yaHrk4pxqwxMUzwcKbyrPMzZG90NGDrh/JH8t72dq3IN1jfCHb3L/e5ebh4ukmxyDn6O8g08jt7tf26ybz+m/W9GNXzUQ9fm1Q/APoSWAhhfkMAmpEbRhFKwsvCsmosRIHx444PoKcIXKkjIImjTzjkQAAIfkEBQUABAAsAgA8AEIAQgAAA/VIBNz+8KlJq72Yxs1d/uDVjVxogmQqnaylvkArT7A63/V47/m2/8CgcEgsGo/IpHLJbDqf0Kh0Sj0FroGqDMvVmrjgrDcTBo8v5fCZki6vCW33Oq4+0832O/at3+f7fICBdzsChgJGeoWHhkV0P4yMRG1BkYeOeECWl5hXQ5uNIAOjA1KgiKKko1CnqBmqqk+nIbCkTq20taVNs7m1vKAnurtLvb6wTMbHsUq4wrrFwSzDzcrLtknW16tI2tvERt6pv0fi48jh5h/U6Zs77EXSN/BE8jP09ZFA+PmhP/xvJgAMSGBgQINvEK5ReIZhQ3QEMTBLAAAh+QQFBQAEACwCAB8AMABXAAAD50i6DA4syklre87qTbHn4OaNYSmNqKmiqVqyrcvBsazRpH3jmC7yD98OCBF2iEXjBKmsAJsWHDQKmw571l8my+16v+CweEwum8+hgHrNbrvbtrd8znbR73MVfg838f8BeoB7doN0cYZvaIuMjY6PkJGSk2gClgJml5pjmp2YYJ6dX6GeXaShWaeoVqqlU62ir7CXqbOWrLafsrNctjIDwAMWvC7BwRWtNsbGFKc+y8fNsTrQ0dK3QtXAYtrCYd3eYN3c49/a5NVj5eLn5u3s6e7x8NDo9fbL+Mzy9/T5+tvUzdN3Zp+GBAAh+QQJBQAEACwCAAIAfAB8AAAD/0i63P4wykmrvTjrzbv/YCiOZGmeaKqubOu+cCzPdArcQK2TOL7/nl4PSMwIfcUk5YhUOh3M5nNKiOaoWCuWqt1Ou16l9RpOgsvEMdocXbOZ7nQ7DjzTaeq7zq6P5fszfIASAYUBIYKDDoaGIImKC4ySH3OQEJKYHZWWi5iZG0ecEZ6eHEOio6SfqCaqpaytrpOwJLKztCO2jLi1uoW8Ir6/wCHCxMG2x7muysukzb230M6H09bX2Nna29zd3t/g4cAC5OXm5+jn3Ons7eba7vHt2fL16tj2+QL0+vXw/e7WAUwnrqDBgwgTKlzIsKHDh2gGSBwAccHEixAvaqTYcFCjRoYeNyoM6REhyZIHT4o0qPIjy5YTTcKUmHImx5cwE85cmJPnSYckK66sSAAj0aNIkypdyrSp06dQo0qdSrWq1atYs2rdyrWr169gwxZJAAA7)}.w2ui-icon{background-repeat:no-repeat;height:16px;width:16px;overflow:hidden;margin:2px 2px;display:inline-block}.w2ui-icon.icon-folder{background:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABGdBTUEAAK/INwWK6QAAABl0RVh0U29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZTwAAAGrSURBVDjLxZO7ihRBFIa/6u0ZW7GHBUV0UQQTZzd3QdhMQxOfwMRXEANBMNQX0MzAzFAwEzHwARbNFDdwEd31Mj3X7a6uOr9BtzNjYjKBJ6nicP7v3KqcJFaxhBVtZUAK8OHlld2st7Xl3DJPVONP+zEUV4HqL5UDYHr5xvuQAjgl/Qs7TzvOOVAjxjlC+ePSwe6DfbVegLVuT4r14eTr6zvA8xSAoBLzx6pvj4l+DZIezuVkG9fY2H7YRQIMZIBwycmzH1/s3F8AapfIPNF3kQk7+kw9PWBy+IZOdg5Ug3mkAATy/t0usovzGeCUWTjCz0B+Sj0ekfdvkZ3abBv+U4GaCtJ1iEm6ANQJ6fEzrG/engcKw/wXQvEKxSEKQxRGKE7Izt+DSiwBJMUSm71rguMYhQKrBygOIRStf4TiFFRBvbRGKiQLWP29yRSHKBTtfdBmHs0BUpgvtgF4yRFR+NUKi0XZcYjCeCG2smkzLAHkbRBmP0/Uk26O5YnUActBp1GsAI+S5nRJJJal5K1aAMrq0d6Tm9uI6zjyf75dAe6tx/SsWeD//o2/Ab6IH3/h25pOAAAAAElFTkSuQmCC) no-repeat center}.w2ui-icon.icon-page{background:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABGdBTUEAAK/INwWK6QAAABl0RVh0U29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZTwAAAINSURBVBgZBcG/r55zGAfg6/4+z3va01NHlYgzEfE7MdCIGISFgS4Gk8ViYyM2Mdlsko4GSf8Do0FLRCIkghhYJA3aVBtEz3nP89wf11VJvPDepdd390+8Nso5nESBQoq0pfvXm9fzWf19453LF85vASqJlz748vInb517dIw6EyYBIIG49u+xi9/c9MdvR//99MPPZ7+4cP4IZhhTPbwzT2d+vGoaVRRp1rRliVvHq+cfvM3TD82+7mun0o/ceO7NT+/4/KOXjwZU1ekk0840bAZzMQ2mooqh0A72d5x/6sB9D5zYnff3PoYBoWBgFKPKqDKqjCpjKr//dcu9p489dra88cydps30KswACfNEKanSaxhlntjJ8Mv12Paie+vZ+0+oeSwwQ0Iw1xAR1CiFNJkGO4wu3ZMY1AAzBI0qSgmCNJsJUEOtJSMaCTBDLyQ0CknAGOgyTyFFiLI2awMzdEcSQgSAAKVUmAeNkxvWJWCGtVlDmgYQ0GFtgg4pNtOwbBcwQy/Rife/2yrRRVI0qYCEBly8Z+P4qMEMy7JaVw72N568e+iwhrXoECQkfH91kY7jwwXMsBx1L93ZruqrK6uuiAIdSnTIKKPLPFcvay8ww/Hh+ufeznTXu49v95IMoQG3784gYXdTqvRmqn/Wpa/ADFX58MW3L71SVU9ETgEIQQQIOOzub+fhIvwPRDgeVjWDahIAAAAASUVORK5CYII=) no-repeat center}.w2ui-lock{display:none;position:absolute;z-index:1400;top:0;left:0;width:100%;height:100%;opacity:.15;background-color:#333}.w2ui-lock-msg{display:none;position:absolute;z-index:1400;top:50%;left:50%;transform:translateX(-50%) translateY(-50%);min-width:100px;max-width:95%;padding:30px;white-space:nowrap;text-overflow:ellipsis;overflow:hidden;font-size:13px;font-family:OpenSans;opacity:.8;background-color:#555;color:#fff;text-align:center;border-radius:5px;border:2px solid #444}.w2ui-lock-msg .w2ui-spinner{display:inline-block;width:24px;height:24px;margin:-3px 8px -7px -10px}.w2ui-scroll-wrapper{overflow:hidden}.w2ui-scroll-left,.w2ui-scroll-right{top:0;width:18px;height:100%;cursor:default;z-index:10;display:none;position:absolute}.w2ui-scroll-left:hover,.w2ui-scroll-right:hover{background-color:#ddd}.w2ui-scroll-left{left:0;box-shadow:0 0 7px #5f5f5f;background:#f7f7f7 url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAQAAADZc7J/AAAAzklEQVR4Ae2THRDEMBCFzy1ucatb3eJ2uhi3uNUtbnGrW9zi1rOdNzdvdl7nDpvYt/9/r7+/51myZZf/zXkD2iMHHRSb0x3oskwMieK05PwEXqP4ExSL0wp0ROao2OOuMPOMdUL6XU1/oGLcFWb+NqyTd2W/P/2qTr9h+nFXhOkHXRHiNyjrgp/U/V+WaQcaNY13zZI0A1JvcVqAnrGDTdtDtZUHjHIJhxxVLN0iqXgCP1l/7h8U9kc6abyJ4/eNWPpGdBv+XdUK0K8cnvcBly2rDr7C1HQAAAAASUVORK5CYII=) center center no-repeat;background-size:15px 12px}.w2ui-scroll-right{right:0;box-shadow:0 0 7px #5f5f5f;background:#f7f7f7 url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAQAAADZc7J/AAAAz0lEQVR4Ae2UG7TGMBCEr1vd4la3uMUtuli3utWtbnGLW9zi9l/bDMzJG7u12cfJfLunf1+UEC9Bv0vVQwJ8hjRCaZafflb1C9RQf4OD0gSDE+i+PiJAabFhQc1y1AYYsJGLY3lgxM17uWPO56yPiFDqVPWgRtpIHSd1zPnwkBsdI58OlNwx4fP2X0TgfMTOoHSdKOXkpyNvEyQh7ul+4swxJSTQuwNDxz68l/ukVNbu0Neen5Z+KvzWxBAqHds349uPFJ/jVOrPjxUq++OLf+20q5+noXo0AAAAAElFTkSuQmCC) center center no-repeat;background-size:15px 13px}#w2ui-notify{position:absolute;display:flex;flex-direction:column;align-items:center;left:0;right:0;bottom:15px;z-index:10000;overflow:hidden}#w2ui-notify>div{position:relative;background-color:#292828ba;color:#fff;padding:8px 44px 8px 16px;border-radius:4px;box-shadow:3px 3px 10px #9c9c9c;max-height:76px;min-width:100px;max-width:800px;font-size:16px;text-shadow:1px 0 0 #000}#w2ui-notify>div a{color:#6cd0e8;text-decoration:none;cursor:pointer}#w2ui-notify>div a:hover{color:#a2f0ff}#w2ui-notify>div span.w2ui-notify-close{padding:6px 6px;border-radius:3px;font-size:13px;color:#c3c3c3;position:absolute;right:5px;top:5px}#w2ui-notify>div span.w2ui-notify-close:hover{background-color:#807e7e;color:#fff}#w2ui-notify>div.w2ui-notify-error{text-shadow:none;background-color:rgba(255,0,0,.8)}#w2ui-notify>div.w2ui-notify-error .w2ui-notify-close{color:#fff}#w2ui-notify>div.w2ui-notify-error .w2ui-notify-close:hover{background-color:#fcadad;color:rgba(255,0,0,.8)}button.w2ui-btn,input[type=button].w2ui-btn{position:relative;display:inline-block;border-radius:14px;margin:0 3px;padding:6px 12px;color:#666;font-size:12px;border:1px solid transparent;background-image:linear-gradient(#e8e8ee 0,#e8e8ee 100%);outline:0;box-shadow:0 1px 0 #fff;cursor:default;min-width:75px;line-height:110%;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;-o-user-select:none;user-select:none;-webkit-tap-highlight-color:transparent}button.w2ui-btn:hover,input[type=button].w2ui-btn:hover{text-decoration:none;background-image:linear-gradient(#ddd 0,#ddd 100%);color:#333}button.w2ui-btn.clicked,button.w2ui-btn:active,input[type=button].w2ui-btn.clicked,input[type=button].w2ui-btn:active{background-image:linear-gradient(#ccc 0,#ccc 100%);text-shadow:1px 1px 1px #eee}button.w2ui-btn:focus:before,input[type=button].w2ui-btn:focus:before{content:"";border:1px dashed #aaa;border-radius:15px;position:absolute;top:2px;bottom:2px;left:2px;right:2px;pointer-events:none}button.w2ui-btn-blue,input[type=button].w2ui-btn-blue{color:#fff;background-image:linear-gradient(#269df0 0,#269df0 100%);border:1px solid #269df0;text-shadow:0 0 1px #111}button.w2ui-btn-blue:hover,input[type=button].w2ui-btn-blue:hover{color:#fff;background-image:linear-gradient(#2391dd 0,#2391dd 100%);border:1px solid #2391dd;text-shadow:0 0 1px #111}button.w2ui-btn-blue.clicked,button.w2ui-btn-blue:active,input[type=button].w2ui-btn-blue.clicked,input[type=button].w2ui-btn-blue:active{color:#fff;background-image:linear-gradient(#1e83c9 0,#1e83c9 100%);border:1px solid #1268a6;text-shadow:0 0 1px #111}button.w2ui-btn-blue:focus:before,input[type=button].w2ui-btn-blue:focus:before{border:1px dashed #e8e8e8}button.w2ui-btn-green,input[type=button].w2ui-btn-green{color:#fff;background-image:linear-gradient(#52a452 0,#52a452 100%);border:1px solid #52a452;text-shadow:0 0 1px #111}button.w2ui-btn-green:hover,input[type=button].w2ui-btn-green:hover{color:#fff;background-image:linear-gradient(#3f8f3d 0,#3f8f3d 100%);border:1px solid #3f8f3d;text-shadow:0 0 1px #111}button.w2ui-btn-green.clicked,button.w2ui-btn-green:active,input[type=button].w2ui-btn-green.clicked,input[type=button].w2ui-btn-green:active{color:#fff;background-image:linear-gradient(#377d36 0,#377d36 100%);border:1px solid #555;text-shadow:0 0 1px #111}button.w2ui-btn-green:focus:before,input[type=button].w2ui-btn-green:focus:before{border:1px dashed #e8e8e8}button.w2ui-btn-orange,input[type=button].w2ui-btn-orange{color:#fff;background-image:linear-gradient(#fb8822 0,#fb8822 100%);border:1px solid #fb8822;text-shadow:0 0 1px #111}button.w2ui-btn-orange:hover,input[type=button].w2ui-btn-orange:hover{color:#fff;background-image:linear-gradient(#f1731f 0,#f1731f 100%);border:1px solid #f1731f;text-shadow:0 0 1px #111}button.w2ui-btn-orange.clicked,button.w2ui-btn-orange:active,input[type=button].w2ui-btn-orange.clicked,input[type=button].w2ui-btn-orange:active{color:#fff;border:1px solid #666;background-image:linear-gradient(#b98747 0,#b98747 100%);text-shadow:0 0 1px #111}button.w2ui-btn-orange:focus:before,input[type=button].w2ui-btn-orange:focus:before{border:1px dashed #f9f9f9}button.w2ui-btn-red,input[type=button].w2ui-btn-red{color:#fff;background-image:linear-gradient(#f9585a 0,#f9585a 100%);border:1px solid #f9585a;text-shadow:0 0 1px #111}button.w2ui-btn-red:hover,input[type=button].w2ui-btn-red:hover{color:#fff;background-image:linear-gradient(#de4446 0,#de4446 100%);border:1px solid #de4446;text-shadow:0 0 1px #111}button.w2ui-btn-red.clicked,button.w2ui-btn-red:active,input[type=button].w2ui-btn-red.clicked,input[type=button].w2ui-btn-red:active{color:#fff;border:1px solid #861c1e;background-image:linear-gradient(#9c2123 0,#9c2123 100%);text-shadow:0 0 1px #111}button.w2ui-btn-red:focus:before,input[type=button].w2ui-btn-red:focus:before{border:1px dashed #ddd}button.w2ui-btn-small,input[type=button].w2ui-btn-small{padding:5px;border-radius:4px;margin:0;min-width:0}button.w2ui-btn-small:focus:before,input[type=button].w2ui-btn-small:focus:before{border-radius:2px;top:2px;bottom:2px;left:2px;right:2px}button.w2ui-btn:disabled,input[type=button].w2ui-btn:disabled{border:1px solid #e6e6e6;background:#f7f7f7;color:#bdbcbc;text-shadow:none}.w2ui-overlay{--tip-size:8px;position:fixed;z-index:1700;opacity:0;transition:opacity .1s;border-radius:4px}.w2ui-overlay *{box-sizing:border-box}.w2ui-overlay .w2ui-overlay-body{display:inline-block;border:1px solid #474747;border-radius:4px;padding:4px 8px;margin:0;font-size:12px;font-family:OpenSans;color:#fff;text-shadow:0 1px 1px #4a4a4a;background-color:#777;line-height:1.4;letter-spacing:.1px;overflow:auto}.w2ui-overlay .w2ui-overlay-body.w2ui-light{color:#3c3c3c;text-shadow:none;background-color:#fffde9;border:1px solid #d2d2d2;box-shadow:0 1px 1px 1px #fff}.w2ui-overlay .w2ui-overlay-body.w2ui-white{color:#3c3c3c;text-shadow:none;background-color:#fafafa;border:1px solid #cccace;box-shadow:0 0 1px 1px #fff}.w2ui-overlay .w2ui-overlay-body.w2ui-arrow-right:before{content:"";position:absolute;left:calc(var(--tip-size,8px) * -.5 - 1px);top:calc(50% - 1px);transform:rotate(-45deg) translateY(-50%);transform-origin:top center;margin:0;border:inherit;border-color:inherit;background-color:inherit;width:var(--tip-size,8px);height:var(--tip-size,8px);border-bottom-right-radius:200px;border-bottom-width:0;border-right-width:0}.w2ui-overlay .w2ui-overlay-body.w2ui-arrow-left:after{content:"";position:absolute;right:calc(var(--tip-size,8px) * -.5 - 1px);top:calc(50% - 1px);transform:rotate(135deg) translateY(-50%);transform-origin:top center;margin:0;border:inherit;border-color:inherit;background-color:inherit;width:var(--tip-size,8px);height:var(--tip-size,8px);border-bottom-right-radius:200px;border-bottom-width:0;border-right-width:0}.w2ui-overlay .w2ui-overlay-body.w2ui-arrow-top:before{content:"";position:absolute;bottom:calc(var(--tip-size,8px) * -.5 + 3px);left:50%;transform-origin:center left;transform:rotate(-135deg) translateX(-50%);margin:0;border:inherit;border-color:inherit;background-color:inherit;width:var(--tip-size,8px);height:var(--tip-size,8px);border-bottom-right-radius:200px;border-bottom-width:0;border-right-width:0}.w2ui-overlay .w2ui-overlay-body.w2ui-arrow-bottom:after{content:"";position:absolute;top:calc(var(--tip-size,8px) * -.5);left:50%;transform:rotate(45deg) translateX(-50%);transform-origin:center left;margin:0;border:inherit;border-color:inherit;background-color:inherit;width:var(--tip-size,8px);height:var(--tip-size,8px);border-bottom-right-radius:200px;border-bottom-width:0;border-right-width:0}.w2ui-colors{padding:8px;padding-bottom:0;background-color:#fff;border-radius:3px;overflow:hidden;width:270px;height:240px}.w2ui-colors *{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;-ms-box-sizing:border-box;-o-box-sizing:border-box;box-sizing:border-box}.w2ui-colors .w2ui-color-tabs{display:flex;background-color:#f7f7f7;height:34px;margin:14px -8px 0 -8px;border-top:1px solid #d6d6d6}.w2ui-colors .w2ui-color-tabs .w2ui-color-tab{display:inline-block;width:65px;height:32px;border:0;border-top:2px solid transparent;border-radius:1px;margin:-1.5px 4px;text-align:center;font-size:15px;padding-top:4px;color:#7b7b7b}.w2ui-colors .w2ui-color-tabs .w2ui-color-tab:hover{background-color:#e1e1e1}.w2ui-colors .w2ui-color-tabs .w2ui-color-tab.w2ui-selected{border-top-color:#0175ff}.w2ui-colors .w2ui-color-tabs .w2ui-color-tab .w2ui-icon{padding-top:1px;width:30px}.w2ui-colors .w2ui-tab-content.tab-1 .w2ui-color-row{display:flex}.w2ui-colors .w2ui-tab-content.tab-1 .w2ui-color-row .w2ui-color{cursor:default;text-align:center;display:inline-block;width:18px;height:18px;padding:6px;margin:1.5px;border:1px solid transparent}.w2ui-colors .w2ui-tab-content.tab-1 .w2ui-color-row .w2ui-color:hover{outline:1px solid #666;border:1px solid #fff}.w2ui-colors .w2ui-tab-content.tab-1 .w2ui-color-row .w2ui-color.w2ui-no-color{border:1px solid #efefef;background:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAABlBMVEX/////TgCFoIUYAAAAGUlEQVR42uXHIQEAAACDsNO/NJ4Kn9uC8wsJkAARUrXAjwAAAABJRU5ErkJggg==) 15px 15px}.w2ui-colors .w2ui-tab-content.tab-1 .w2ui-color-row .w2ui-color.w2ui-selected:before{content:'\2022';position:relative;left:-1px;top:-8px;color:#fff;font-size:14px;text-shadow:0 0 2px #222}.w2ui-colors .w2ui-tab-content.tab-2{height:184px;padding:1px 2px}.w2ui-colors .w2ui-tab-content.tab-2 .palette{position:relative;width:150px;height:125px;outline:1px solid #d2d2d2}.w2ui-colors .w2ui-tab-content.tab-2 .palette .palette-bg{height:100%;background-image:linear-gradient(0deg,#000,rgba(204,154,129,0));pointer-events:none}.w2ui-colors .w2ui-tab-content.tab-2 .rainbow{position:relative;width:150px;height:12px;margin:10px 0 0 0;background:linear-gradient(90deg,red 0,#ff0 17%,#0f0 33%,#0ff 50%,#00f 67%,#f0f 83%,red 100%)}.w2ui-colors .w2ui-tab-content.tab-2 .alpha{position:relative;width:150px;height:12px;margin:20px 0 0 0;background-color:#fff;background-image:linear-gradient(45deg,#bbb 25%,transparent 25%,transparent 75%,#bbb 75%,#bbb),linear-gradient(45deg,#bbb 25%,transparent 25%,transparent 75%,#bbb 75%,#bbb);background-size:12px 12px;background-position:0 0,6px 6px}.w2ui-colors .w2ui-tab-content.tab-2 .alpha .alpha-bg{height:100%;background-image:linear-gradient(90deg,rgba(80,80,80,0) 0,#505050 100%);pointer-events:none}.w2ui-colors .w2ui-tab-content.tab-2 .value1{pointer-events:none;position:absolute;top:0;display:inline-block;width:8px;height:8px;border-radius:10px;border:1px solid #999;outline:1px solid #bbb;background-color:transparent;box-shadow:0 0 1px #fff;transform:translateX(-3px) translateY(-3px)}.w2ui-colors .w2ui-tab-content.tab-2 .value2{pointer-events:none;position:absolute;top:-2px;display:inline-block;width:8px;height:16px;border-radius:2px;border:1px solid #696969;background-color:#fff;box-shadow:0 0 1px #fff;transform:translateX(-1px)}.w2ui-colors .w2ui-tab-content.tab-2 .color-info{float:right;margin-right:-5px}.w2ui-colors .w2ui-tab-content.tab-2 .color-info .color-preview-bg{box-shadow:0 0 1px #c3c3c3;height:40px;background-color:#fff;background-image:linear-gradient(45deg,#bbb 25%,transparent 25%,transparent 75%,#bbb 75%,#bbb),linear-gradient(45deg,#bbb 25%,transparent 25%,transparent 75%,#bbb 75%,#bbb);background-size:16px 16px;background-position:0 0,8px 8px;margin-bottom:10px}.w2ui-colors .w2ui-tab-content.tab-2 .color-info .color-original,.w2ui-colors .w2ui-tab-content.tab-2 .color-info .color-preview{height:40px;width:50px;float:left}.w2ui-colors .w2ui-tab-content.tab-2 .color-info .color-part{padding-top:7px}.w2ui-colors .w2ui-tab-content.tab-2 .color-info .color-part span{display:inline-block;width:8px;margin:2px 1px 2px 5px;color:#666}.w2ui-colors .w2ui-tab-content.tab-2 .color-info .color-part input{font-size:12px;border-radius:2px;border:1px solid #ccc;width:30px;text-align:right;padding:4px;color:#333}.w2ui-colors .w2ui-tab-content.tab-2 .color-info .color-part.opacity{margin:11px 0 0 8px}.w2ui-colors .w2ui-tab-content.tab-2 .color-info .color-part.opacity span{width:42px}.w2ui-colors .w2ui-tab-content.tab-2 .color-info .color-part.opacity input{width:38px;text-align:center}.w2ui-menu-search,.w2ui-menu-top{position:sticky;top:0;background-color:#fff;border-bottom:1px dotted silver}.w2ui-menu-search{padding:6px 4px}.w2ui-menu-search .w2ui-icon{position:absolute;top:11px;left:6px;color:#90819c;font-size:14px}.w2ui-menu-search #menu-search{width:100%;padding:5px 5px 5px 25px}.w2ui-menu{display:block;color:#000;padding:5px 0;border-radius:5px;overflow-x:hidden;cursor:default}.w2ui-menu .w2ui-menu-item{display:flex;align-content:stretch;padding:8px 5px;user-select:none}.w2ui-menu .w2ui-menu-item.w2ui-even{color:inherit;background-color:#fff}.w2ui-menu .w2ui-menu-item.w2ui-odd{color:inherit;background-color:#fbfbfb}.w2ui-menu .w2ui-menu-item:hover{background-color:#f0f3ff}.w2ui-menu .w2ui-menu-item.w2ui-selected{background-color:#e1e7ff}.w2ui-menu .w2ui-menu-item.w2ui-disabled{opacity:.4;color:inherit;background-color:transparent}.w2ui-menu .w2ui-menu-item .menu-icon{flex:none;width:26px;height:16px;padding:0;margin:0}.w2ui-menu .w2ui-menu-item .menu-icon span{width:18px;font-size:14px;color:#8d99a7;display:inline-block;padding-top:1px}.w2ui-menu .w2ui-menu-item .menu-text{flex-grow:1;white-space:nowrap}.w2ui-menu .w2ui-menu-item .menu-extra{flex:none;min-width:10px}.w2ui-menu .w2ui-menu-item .menu-extra span{border:1px solid #f6fcf4;border-radius:20px;width:auto;height:18px;padding:2px 7px;margin:0 0 0 10px;background-color:#f2f8f0;color:#666;box-shadow:0 0 2px #474545;text-shadow:1px 1px 0 #fff}.w2ui-menu .w2ui-menu-item .menu-extra span.hotkey{border:none;border-radius:0;background-color:transparent;color:#888;box-shadow:none;text-shadow:none}.w2ui-menu .w2ui-menu-item .menu-extra span.remove{background-color:transparent;border-color:transparent;box-shadow:none;padding:0 5px;border-radius:3px;position:relative;margin-top:-3px;display:block;height:20px;width:20px;text-align:center;user-select:none}.w2ui-menu .w2ui-menu-item .menu-extra span.remove:hover{background-color:#f9e7e7;color:red}.w2ui-menu .w2ui-menu-item .menu-extra span.remove:active{background-color:#ffd1d1}.w2ui-menu .w2ui-menu-divider{padding:5px}.w2ui-menu .w2ui-menu-divider .line{border-top:1px dotted silver}.w2ui-menu .w2ui-menu-divider.has-text{height:26px;background-color:#fafafa;border-top:1px solid #f2f2f2;border-bottom:1px solid #f2f2f2;text-align:center}.w2ui-menu .w2ui-menu-divider.has-text .line{display:block;margin-top:7px}.w2ui-menu .w2ui-menu-divider.has-text .text{display:inline-block;position:relative;top:-10px;background-color:#fafafa;padding:0 7px;color:#a9a9a9}.w2ui-menu .w2ui-no-items{padding:5px 15px;text-align:center;color:gray}.w2ui-menu .w2ui-no-items .w2ui-spinner{position:relative;left:-2px;margin-bottom:-5px;width:18px;height:18px}.w2ui-menu .w2ui-sub-menu-box{background-color:#fafafd;border-top:1px solid #d6e2e6;border-bottom:1px solid #d6e2e6;padding:0 3px}.w2ui-menu .collapsed .menu-extra span,.w2ui-menu .expanded .menu-extra span{position:relative;border-color:transparent;background-color:transparent;box-shadow:none;padding:0 6px;border-radius:0;margin-left:5px}.w2ui-menu .collapsed .menu-extra span:after,.w2ui-menu .expanded .menu-extra span:after{content:"";position:absolute;border-left:5px solid grey;border-top:5px solid transparent;border-bottom:5px solid transparent;transform:rotateZ(-90deg);pointer-events:none;margin-left:-2px;margin-top:3px}.w2ui-menu .collapsed .menu-extra span:hover,.w2ui-menu .expanded .menu-extra span:hover{border-color:transparent;background-color:transparent}.w2ui-menu .collapsed .menu-extra span:after{transform:rotateZ(90deg)}.w2ui-calendar{margin:0;line-height:1.1;user-select:none}.w2ui-calendar.w2ui-overlay-body{border:1px solid #cccace;color:#3c3c3c;text-shadow:none;background-color:#fff;box-shadow:0 1px 6px 1px #ebeaec}.w2ui-calendar .w2ui-cal-title,.w2ui-calendar .w2ui-time-title{margin:0;padding:7px 2px;background-color:#fafafa;border-top:1px solid #fefefe;border-bottom:1px solid #ddd;color:#555;text-align:center;text-shadow:1px 1px 1px #eee;font-size:16px;cursor:pointer}.w2ui-calendar .w2ui-cal-title .arrow-down,.w2ui-calendar .w2ui-time-title .arrow-down{position:relative;top:-3px;left:5px;opacity:.6}.w2ui-calendar .w2ui-cal-next,.w2ui-calendar .w2ui-cal-previous{width:30px;height:30px;color:#666;border:1px solid transparent;border-radius:3px;padding:7px 5px;margin:-4px 1px 0 1px;cursor:default}.w2ui-calendar .w2ui-cal-next:hover,.w2ui-calendar .w2ui-cal-previous:hover{color:#000;border:1px solid #f5f5f5;background-color:#f9f7f7}.w2ui-calendar .w2ui-cal-next:active,.w2ui-calendar .w2ui-cal-previous:active{color:#000;background-color:#f2f1f4;border:1px solid #e6dbfb}.w2ui-calendar .w2ui-cal-next>div,.w2ui-calendar .w2ui-cal-previous>div{position:absolute;border-left:4px solid #888;border-top:4px solid #888;border-right:4px solid transparent;border-bottom:4px solid transparent;width:0;height:0;padding:0;margin:3px 0 0 0}.w2ui-calendar .w2ui-cal-previous{float:left}.w2ui-calendar .w2ui-cal-previous>div{-webkit-transform:rotate(-45deg);-moz-transform:rotate(-45deg);-ms-transform:rotate(-45deg);-o-transform:rotate(-45deg);transform:rotate(-45deg);margin-left:6px}.w2ui-calendar .w2ui-cal-next{float:right}.w2ui-calendar .w2ui-cal-next>div{-webkit-transform:rotate(135deg);-moz-transform:rotate(135deg);-ms-transform:rotate(135deg);-o-transform:rotate(135deg);transform:rotate(135deg);margin-left:2px;margin-right:2px}.w2ui-calendar .w2ui-cal-jump{display:flex;background-color:#fdfdfd}.w2ui-calendar .w2ui-cal-jump .w2ui-jump-month,.w2ui-calendar .w2ui-cal-jump .w2ui-jump-year{cursor:default;text-align:center;border:1px solid transparent;border-radius:3px;font-size:14px}.w2ui-calendar .w2ui-cal-jump #w2ui-jump-month{width:186px;padding:10px 5px 4px 3px;border-right:1px solid #efefef;display:grid;grid-template-columns:repeat(3,1fr);grid-template-rows:repeat(4,52px);grid-gap:4px}.w2ui-calendar .w2ui-cal-jump #w2ui-jump-month .w2ui-jump-month{padding:15px 0 0 0}.w2ui-calendar .w2ui-cal-jump #w2ui-jump-year{width:90px;height:240px;overflow-x:hidden;overflow-y:auto;margin:0 2px;display:flex;flex-wrap:wrap}.w2ui-calendar .w2ui-cal-jump #w2ui-jump-year .w2ui-jump-year{width:95%;height:30px;padding:5px 0;margin:1px 0}.w2ui-calendar .w2ui-cal-jump .w2ui-jump-month:hover,.w2ui-calendar .w2ui-cal-jump .w2ui-jump-year:hover{color:#000;border:1px solid #f5f5f5;background-color:#f9f7f7}.w2ui-calendar .w2ui-cal-jump .w2ui-jump-month.w2ui-selected,.w2ui-calendar .w2ui-cal-jump .w2ui-jump-year.w2ui-selected{color:#000;background-color:#f2f1f4;border:1px solid #e6dbfb}.w2ui-calendar .w2ui-cal-now{cursor:default;padding:3px;text-align:center;background-color:#f4f4f4;margin:5px;border:1px solid #e5e5e5;border-radius:4px}.w2ui-calendar .w2ui-cal-now:hover{color:#28759e;border:1px solid #c3d6df}.w2ui-calendar .w2ui-cal-days{width:280px;height:240px;padding:2px;display:grid;grid-template-columns:repeat(7,1fr)}.w2ui-calendar .w2ui-cal-days .w2ui-day{border:1px solid #fff;border-radius:3px;color:#000;background-color:#f7f7f7;padding:8px 0 0 0;cursor:default;text-align:center}.w2ui-calendar .w2ui-cal-days .w2ui-day.w2ui-saturday,.w2ui-calendar .w2ui-cal-days .w2ui-day.w2ui-sunday{border:1px solid #fff;color:#c8493b;background-color:#f7f7f7}.w2ui-calendar .w2ui-cal-days .w2ui-day.w2ui-today{background-color:#e2f7cd}.w2ui-calendar .w2ui-cal-days .w2ui-day:hover{background-color:#f2f1f4;border:1px solid #e6dbfb}.w2ui-calendar .w2ui-cal-days .w2ui-day:active{background-color:#eeebf3;border:1px solid #cec2e5}.w2ui-calendar .w2ui-cal-days .w2ui-day.w2ui-selected{border:1px solid #8cb067}.w2ui-calendar .w2ui-cal-days .w2ui-day.w2ui-weekday{text-align:center;background-color:#fff;color:#a99cc2}.w2ui-calendar .w2ui-cal-days .w2ui-day.w2ui-weekday:hover{border:1px solid #fff;background-color:#fff}.w2ui-calendar .w2ui-cal-days .w2ui-day.outside{color:#b5b5b5;background-color:#fff}.w2ui-calendar .w2ui-cal-days .w2ui-day.w2ui-blocked{color:#555;background-color:#fff;border:1px solid #fff}.w2ui-calendar .w2ui-cal-days .w2ui-day.w2ui-blocked:after{content:" ";position:absolute;color:#b3b3b378;font-size:27px;padding:0;font-family:verdana;transform:translate(-15px,15px) rotate(-36deg);border-top:1px solid #c9c2c2;width:24px;transform-origin:top left}.w2ui-cal-time{display:grid;grid-template-columns:repeat(3,1fr);background-color:#fff;cursor:default}.w2ui-cal-time .w2ui-cal-column{width:90px;display:flex;flex-wrap:wrap;padding:4px}.w2ui-cal-time .w2ui-cal-column:nth-child(even){background-color:#fafafa}.w2ui-cal-time .w2ui-cal-column span{width:100%;padding:8px;margin:1px;text-align:center;border:1px solid transparent;border-radius:2px;white-space:nowrap}.w2ui-cal-time .w2ui-cal-column span:hover{background-color:#f2f1f4;border:1px solid #e6dbfb}.w2ui-cal-time .w2ui-cal-column span:active{background-color:#eeebf3;border:1px solid #cec2e5}.w2ui-cal-time .w2ui-cal-column span.w2ui-blocked{pointer-events:none;text-decoration:line-through;color:silver}.w2ui-form{position:relative;color:#000;background-color:#fcfcfb;border:1px solid #e1e1e1;border-radius:3px;padding:0;overflow:hidden}.w2ui-form>div{position:absolute;overflow:hidden}.w2ui-form .w2ui-form-header{position:absolute;top:0;left:0;right:0;height:36px;padding:10px;overflow:hidden;font-size:16px;color:#444;background-color:#fff;border-top-left-radius:2px;border-top-right-radius:2px;border-bottom:1px solid #f1f1f1}.w2ui-form .w2ui-form-toolbar{position:absolute;left:0;right:0;margin:0;padding:2px;border-top-left-radius:3px;border-top-right-radius:3px;border-bottom:1px solid #f1f1f1}.w2ui-form .w2ui-form-tabs{position:absolute;left:0;right:0;margin:0;padding:0;height:32px;border-top-left-radius:3px;border-top-right-radius:3px;padding-top:4px;background-color:#fff}.w2ui-form .w2ui-form-tabs .w2ui-tab.active{background-color:#fcfcfb}.w2ui-form .w2ui-page{position:absolute;left:0;right:0;overflow:auto;padding:10px 5px 0 5px;border-left:1px solid inherit;border-right:1px solid inherit;background-color:inherit;border-radius:3px}.w2ui-form .w2ui-column-container{display:flex;padding:0}.w2ui-form .w2ui-column-container .w2ui-column{width:100%}.w2ui-form .w2ui-column-container .w2ui-column.col-0,.w2ui-form .w2ui-column-container .w2ui-column.col-1,.w2ui-form .w2ui-column-container .w2ui-column.col-10,.w2ui-form .w2ui-column-container .w2ui-column.col-2,.w2ui-form .w2ui-column-container .w2ui-column.col-3,.w2ui-form .w2ui-column-container .w2ui-column.col-4,.w2ui-form .w2ui-column-container .w2ui-column.col-5,.w2ui-form .w2ui-column-container .w2ui-column.col-6,.w2ui-form .w2ui-column-container .w2ui-column.col-7,.w2ui-form .w2ui-column-container .w2ui-column.col-8,.w2ui-form .w2ui-column-container .w2ui-column.col-9{padding:0;padding-left:10px}.w2ui-form .w2ui-column-container .w2ui-column.col-0{padding-left:0}.w2ui-form .w2ui-buttons{position:absolute;left:0;right:0;bottom:0;text-align:center;border-top:1px solid #f1f1f1;border-bottom:0 solid #f1f1f1;background-color:#fff;padding:15px 0;border-bottom-left-radius:3px;border-bottom-right-radius:3px}.w2ui-form .w2ui-buttons button,.w2ui-form .w2ui-buttons input[type=button]{min-width:80px;margin-right:5px}.w2ui-form input[type=checkbox]:not(.w2ui-toggle),.w2ui-form input[type=radio]{margin-top:4px;margin-bottom:4px;width:14px;height:14px}.w2ui-group-title{padding:5px 2px 0 5px;color:#656164cc;text-shadow:1px 1px 2px #fdfdfd;font-size:120%}.w2ui-group-fields{background-color:#fff;margin:5px 0 14px 0;padding:10px 5px;border-top:1px dotted #e1e1e1;border-bottom:1px dotted #e1e1e1}.w2ui-field>label{display:block;float:left;margin-top:10px;margin-bottom:0;width:120px;padding:0;white-space:nowrap;overflow:hidden;text-overflow:ellipsis;text-align:right;min-height:20px;color:#666}.w2ui-field>div{margin-bottom:3px;margin-left:128px;padding:4px;min-height:28px;float:none}.w2ui-field.w2ui-required>div{position:relative}.w2ui-field.w2ui-required:not(.w2ui-field-inline)>div::before{content:'*';position:absolute;margin-top:7px;margin-left:-8px;color:red}.w2ui-field.w2ui-required.w2ui-field-inline>div::before{content:''!important}.w2ui-field.w2ui-disabled{opacity:.45;background-color:transparent!important}.w2ui-field.w2ui-disabled input:not([type=button]):not([type=submit]):not([type=checkbox]):not([type=radio]),.w2ui-field.w2ui-disabled select,.w2ui-field.w2ui-disabled textarea{border:1px solid #bdc0c3!important;background-color:#f5f5f5!important}.w2ui-field.w2ui-span-none>label{margin:0;padding:5px 12px 0 4px;display:block;width:98%;text-align:left}.w2ui-field.w2ui-span-none>div{margin-left:0}.w2ui-field.w2ui-span0>label{display:none}.w2ui-field.w2ui-span0>div{margin-left:0}.w2ui-field.w2ui-span1>label{width:20px}.w2ui-field.w2ui-span1>div{margin-left:28px}.w2ui-field.w2ui-span2>label{width:40px}.w2ui-field.w2ui-span2>div{margin-left:48px}.w2ui-field.w2ui-span3>label{width:60px}.w2ui-field.w2ui-span3>div{margin-left:68px}.w2ui-field.w2ui-span4>label{width:80px}.w2ui-field.w2ui-span4>div{margin-left:88px}.w2ui-field.w2ui-span5>label{width:100px}.w2ui-field.w2ui-span5>div{margin-left:108px}.w2ui-field.w2ui-span6>label{width:120px}.w2ui-field.w2ui-span6>div{margin-left:128px}.w2ui-field.w2ui-span7>label{width:140px}.w2ui-field.w2ui-span7>div{margin-left:148px}.w2ui-field.w2ui-span8>label{width:160px}.w2ui-field.w2ui-span8>div{margin-left:168px}.w2ui-field.w2ui-span9>label{width:180px}.w2ui-field.w2ui-span9>div{margin-left:188px}.w2ui-field.w2ui-span10>label{width:200px}.w2ui-field.w2ui-span10>div{margin-left:208px}.w2ui-field.w2ui-field-inline{display:inline}.w2ui-field.w2ui-field-inline>div{display:inline;margin:0;padding:0}.w2ui-field .w2ui-box-label{user-select:none;vertical-align:middle}.w2ui-field .w2ui-box-label input,.w2ui-field .w2ui-box-label span{display:inline-block;vertical-align:middle}.w2ui-field .w2ui-box-label span{padding-left:3px}.w2ui-field .w2ui-box-label input{margin:4px 0 3px 0}input:not([type=button]):not([type=submit]):not([type=checkbox]):not([type=radio]).w2ui-error,textarea.w2ui-error{border:1px solid #ffa8a8;background-color:#fff4eb}.w2field{padding:3px;border-radius:3px;border:1px solid silver}.w2ui-field-helper{position:absolute;display:inline-block;line-height:100%;pointer-events:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;-o-user-select:none;user-select:none}.w2ui-field-helper .w2ui-field-up{position:absolute;top:0;padding:2px 3px;cursor:pointer;pointer-events:all}.w2ui-field-helper .w2ui-field-down{position:absolute;bottom:0;padding:2px 3px;cursor:pointer;pointer-events:all}.w2ui-field-helper .arrow-up:hover{border-bottom-color:#81c6ff}.w2ui-field-helper .arrow-down:hover{border-top-color:#81c6ff}.w2ui-field-helper .w2ui-icon-search{position:absolute;margin:8px 0 0 -2px;display:none;color:#777;width:21px!important;font-size:13px}.w2ui-field-helper .w2ui-icon-search.show-search{display:block}.w2ui-field-helper.w2ui-list{color:inherit;position:absolute;padding:0;margin:0;min-height:28px;overflow:auto;border:1px solid #e0e0e0;border-radius:3px;font-size:6px;line-height:100%;box-sizing:border-box;pointer-events:all;background-color:#f7fafa}.w2ui-field-helper.w2ui-list.has-focus,.w2ui-field-helper.w2ui-list:focus-within{outline:auto #72b2ff;background-color:#fff}.w2ui-field-helper.w2ui-list input[type=text]{-webkit-box-shadow:none;-moz-box-shadow:none;-ms-box-shadow:none;-o-box-shadow:none;box-shadow:none}.w2ui-field-helper.w2ui-list .w2ui-multi-items{position:absolute;display:inline-block;margin:0;padding:0;pointer-events:none}.w2ui-field-helper.w2ui-list .w2ui-multi-items .li-item{pointer-events:all;float:left;margin:3px 0 0 5px;border-radius:15px;width:auto;padding:3px 24px 1px 12px;border:1px solid #b4d0de;background-color:#eff3f5;white-space:nowrap;cursor:default;font-family:OpenSans;font-size:11px;line-height:100%;height:20px;overflow:hidden;text-overflow:ellipsis;box-sizing:border-box}.w2ui-field-helper.w2ui-list .w2ui-multi-items .li-item:hover{background-color:#d0dbe1}.w2ui-field-helper.w2ui-list .w2ui-multi-items .li-item:last-child{border-radius:0;border:1px solid transparent;background-color:transparent}.w2ui-field-helper.w2ui-list .w2ui-multi-items .li-item:last-child input{padding:1px;padding-top:0;margin:0;border:0;outline:0;height:auto;line-height:100%;font-size:inherit;font-family:inherit;background-color:transparent}.w2ui-field-helper.w2ui-list .w2ui-multi-items .li-item .w2ui-icon{float:left;color:#828aa7;margin:1px 2px 0 -6px}.w2ui-field-helper.w2ui-list .w2ui-multi-items .li-item .w2ui-list-remove{float:right;width:16px;height:16px;margin:-2px -20px 0 0;border-radius:2px;font-size:12px;border:1px solid transparent}.w2ui-field-helper.w2ui-list .w2ui-multi-items .li-item .w2ui-list-remove:hover{background-color:#f6e5e5;border:1px solid #fac2c2;color:red;opacity:1}.w2ui-field-helper.w2ui-list .w2ui-multi-items .li-item .w2ui-list-remove:before{position:relative;display:inline-block;left:4px;opacity:.7;content:'x';line-height:1}.w2ui-field-helper.w2ui-list .w2ui-multi-items .li-item>span.file-size{pointer-events:none;color:#777}.w2ui-field-helper.w2ui-list .w2ui-multi-items .li-search{float:left;margin:0;padding:0}.w2ui-field-helper.w2ui-list .w2ui-multi-items .li-search input[type=text]{pointer-events:all;width:0;height:20px;padding:3px 0 3px 0;margin:3px 0 0 5px;border:0;background-color:transparent}.w2ui-field-helper.w2ui-list .w2ui-multi-items .li-search input[type=text]:focus{outline:0;border:0}.w2ui-field-helper.w2ui-list .w2ui-multi-file{position:absolute;left:0;right:0;top:0;bottom:0}.w2ui-field-helper.w2ui-list.w2ui-readonly .w2ui-multi-items>.li-item:hover{background-color:#eff3f5}.w2ui-field-helper.w2ui-list.w2ui-file-dragover{background-color:#e4ffda;border:1px solid #93e07d}.w2ui-field-helper.w2ui-list .w2ui-enum-placeholder{display:inline;position:absolute;pointer-events:none;color:#999;box-sizing:border-box}.w2ui-overlay .w2ui-file-preview{padding:1px;background-color:#fff}.w2ui-overlay .w2ui-file-info{display:grid;grid-template-columns:1fr 2fr;color:#fff;padding:6px 0}.w2ui-overlay .w2ui-file-info .file-caption{text-align:right;color:silver;padding-right:10px}.w2ui-overlay .w2ui-file-info .file-value{color:#fff}.w2ui-overlay .w2ui-file-info .file-type{max-width:200px;display:block-inline;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.arrow-up{background:0 0;width:0;height:0;border-left:4px solid transparent;border-right:4px solid transparent;border-bottom:5px solid #777;font-size:0;line-height:0}.arrow-down{background:0 0;width:0;height:0;border-left:4px solid transparent;border-right:4px solid transparent;border-top:5px solid #777;font-size:0;line-height:0}.arrow-left{background:0 0;width:0;height:0;border-bottom:4px solid transparent;border-top:4px solid transparent;border-right:5px solid #777;font-size:0;line-height:0}.arrow-right{background:0 0;width:0;height:0;border-bottom:4px solid transparent;border-top:4px solid transparent;border-left:5px solid #777;font-size:0;line-height:0}.w2ui-select{cursor:default;color:#000!important;background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACgAAAALCAQAAACnzwd+AAAAcklEQVR4AcXMsQFBQQDG4P9tAgC0gJYRQJZgKQMwCqCku6vVAAAA+NJHP4KHOk0aV2pRw61n4BBmyOxKQ8I4ehZeuhd3HTx6DQEGZ7sBfr2OOOOj3Yi43kMKs9sZknofOexqZ8npMygwWZTX51CipP+YA1OiZJbYYg9lAAAAAElFTkSuQmCC);background-size:17px 6px;background-position:right center;background-repeat:no-repeat}.w2ui-select.has-focus{outline:auto #72b2ff;background-color:#fff!important}.w2ui-select[disabled],.w2ui-select[readonly]{background-image:none;background-color:#f1f1f1!important;color:#777!important}.w2ui-layout{overflow:hidden;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;-ms-box-sizing:border-box;-o-box-sizing:border-box;box-sizing:border-box}.w2ui-layout *{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;-ms-box-sizing:border-box;-o-box-sizing:border-box;box-sizing:border-box}.w2ui-layout>div{position:absolute;overflow:hidden;border:0;margin:0;padding:0;outline:0;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;-ms-box-sizing:border-box;-o-box-sizing:border-box;box-sizing:border-box}.w2ui-layout>div .w2ui-panel{display:none;position:absolute;z-index:120}.w2ui-layout>div .w2ui-panel .w2ui-panel-title{position:absolute;left:0;top:0;right:0;padding:5px;background-color:#fff;color:#656164cc;border:1px solid #efefef;border-bottom:1px solid #f5f5f5}.w2ui-layout>div .w2ui-panel .w2ui-panel-tabs{position:absolute;left:0;top:0;right:0;z-index:2;display:none;overflow:hidden;background-color:#fff;padding:0}.w2ui-layout>div .w2ui-panel .w2ui-panel-tabs>.w2ui-tab.active{background-color:#fcfcfc}.w2ui-layout>div .w2ui-panel .w2ui-panel-toolbar{position:absolute;left:0;top:0;right:0;z-index:2;display:none;overflow:hidden;background-color:#fafafa;border-bottom:1px solid #efefef;padding:2px}.w2ui-layout>div .w2ui-panel .w2ui-panel-content{position:absolute;left:0;top:0;right:0;bottom:0;z-index:1;color:inherit;background-color:#fcfcfc}.w2ui-layout>div .w2ui-resizer{display:none;position:absolute;z-index:121;background-color:transparent}.w2ui-layout>div .w2ui-resizer.active,.w2ui-layout>div .w2ui-resizer:hover{background-color:#c8cad1}.w2ui-grid{position:relative;border:1px solid #e1e1e1;border-radius:2px;overflow:hidden!important}.w2ui-grid>div{position:absolute;overflow:hidden}.w2ui-grid .w2ui-grid-header{position:absolute;top:0;left:0;right:0;height:36px;padding:10px;overflow:hidden;font-size:16px;color:#444;background-color:#fff;border-top-left-radius:2px;border-top-right-radius:2px;border-bottom:1px solid #e1e1e1!important}.w2ui-grid .w2ui-grid-toolbar{position:absolute;border-bottom:1px solid #efefef;background-color:#fafafa;height:52px;padding:9px 3px 0 3px;margin:0;box-shadow:0 1px 2px #f5f5f5}.w2ui-grid .w2ui-grid-toolbar .w2ui-tb-button .w2ui-tb-icon{margin:3px 0 0 0!important}.w2ui-grid .w2ui-grid-toolbar .w2ui-grid-search-input{position:relative;width:300px;left:0;top:-4px}.w2ui-grid .w2ui-grid-toolbar .w2ui-grid-search-input .w2ui-search-down{position:absolute;top:7px;left:4px;color:#8c99a7;font-size:13px}.w2ui-grid .w2ui-grid-toolbar .w2ui-grid-search-input .w2ui-grid-search-name{position:absolute;margin:5px 0 0 3px;padding:4px 27px 4px 10px;background-color:#fbfbfb;border:1px solid #b9b9b9;border-radius:15px;pointer-events:none}.w2ui-grid .w2ui-grid-toolbar .w2ui-grid-search-input .w2ui-grid-search-name .name-icon{position:absolute;margin-left:-6px;color:#8c99a7}.w2ui-grid .w2ui-grid-toolbar .w2ui-grid-search-input .w2ui-grid-search-name .name-text{padding-left:14px}.w2ui-grid .w2ui-grid-toolbar .w2ui-grid-search-input .w2ui-grid-search-name .name-cross{position:absolute;margin-top:-4px;margin-left:7px;padding:4px 5px;pointer-events:all}.w2ui-grid .w2ui-grid-toolbar .w2ui-grid-search-input .w2ui-grid-search-name .name-cross:hover{color:red}.w2ui-grid .w2ui-grid-toolbar .w2ui-grid-search-input .w2ui-search-all{outline:0!important;border-radius:4px!important;line-height:normal!important;height:30px!important;width:300px!important;border:1px solid #e1e1e1!important;color:#000!important;background-color:#f1f1f1!important;padding:1px 28px 0 28px!important;margin:0!important;margin-top:1px!important;font-size:13px!important}.w2ui-grid .w2ui-grid-toolbar .w2ui-grid-search-input .w2ui-search-all:focus{border:1px solid #007cff!important;background-color:#fff!important}.w2ui-grid .w2ui-grid-toolbar .w2ui-grid-search-input .w2ui-search-drop{position:absolute;right:2px;top:3px;height:26px;width:26px;font-size:16px;color:#a4adb1;cursor:pointer;padding:7px 2px 7px 2px;border-radius:4px;background-color:transparent}.w2ui-grid .w2ui-grid-toolbar .w2ui-grid-search-input .w2ui-search-drop span.w2ui-icon-drop{position:relative;top:-2px}.w2ui-grid .w2ui-grid-toolbar .w2ui-grid-search-input .w2ui-search-drop.checked,.w2ui-grid .w2ui-grid-toolbar .w2ui-grid-search-input .w2ui-search-drop:hover{color:#fff;background-color:#56a1e2}.w2ui-grid .w2ui-grid-toolbar .w2ui-grid-searches{display:flex;flex-direction:row;flex-wrap:nowrap;border-top:1px solid #ececec;border-bottom:1px solid #ececec;background-color:#fcfdff;margin:7px -20px 0 -20px;padding:6px 50px 6px 20px;height:36px}.w2ui-grid .w2ui-grid-toolbar .w2ui-grid-searches>div{white-space:nowrap}.w2ui-grid .w2ui-grid-toolbar .w2ui-grid-searches>span{white-space:nowrap;text-overflow:ellipsis;overflow:hidden;border:1px solid #88c3f7;border-radius:15px;padding:4px 12px;margin:0 4px;color:#4c9ad6;font-size:12px;font-weight:700;background-color:#f5f9fe}.w2ui-grid .w2ui-grid-toolbar .w2ui-grid-searches>span>span{font-size:9px;position:relative;top:-1px;left:2px}.w2ui-grid .w2ui-grid-toolbar .w2ui-grid-searches .grid-search-line{border-left:1px solid #ececec;width:11px;height:22px;margin-left:7px;margin-top:1px}.w2ui-grid .w2ui-grid-toolbar .w2ui-grid-searches .w2ui-grid-search-logic{border:1px solid #c8c9ca!important;color:#676767!important}.w2ui-grid .w2ui-grid-toolbar .w2ui-grid-searches button.grid-search-btn{margin:0 3px;padding:0;height:24px;font-size:11px}.w2ui-grid .w2ui-grid-toolbar .w2ui-grid-searches button.grid-search-btn.btn-remove{min-width:26px;position:absolute;left:calc(100% - 35px)}.w2ui-grid .w2ui-grid-toolbar .w2ui-grid-searches .grid-search-count{background-color:#4cb1fd;border-radius:10px;color:#fff;padding:0 6px 1px 6px;font-size:11px!important;position:relative!important;top:0!important}.w2ui-grid .w2ui-grid-toolbar .w2ui-grid-searches .grid-search-list li{padding:5px}.w2ui-grid .w2ui-grid-toolbar .w2ui-grid-searches .grid-search-list input{position:relative;top:2px;left:-3px}.w2ui-grid .w2ui-grid-save-search{padding-top:30px;text-align:center}.w2ui-grid .w2ui-grid-save-search span{width:280px;display:inline-block;text-align:left;padding-bottom:4px}.w2ui-grid .w2ui-grid-save-search .search-name{width:280px!important}.w2ui-grid .w2ui-grid-body{position:absolute;overflow:hidden;padding:0;background-color:#fff;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;-o-user-select:none;user-select:none}.w2ui-grid .w2ui-grid-body input,.w2ui-grid .w2ui-grid-body select,.w2ui-grid .w2ui-grid-body textarea{-webkit-user-select:text;-moz-user-select:text;-ms-user-select:text;-o-user-select:text;user-select:text}.w2ui-grid .w2ui-grid-body .w2ui-grid-columns,.w2ui-grid .w2ui-grid-body .w2ui-grid-fcolumns{overflow:hidden;position:absolute;left:0;top:0;right:0;box-shadow:0 1px 4px #efefef;height:auto}.w2ui-grid .w2ui-grid-body .w2ui-grid-columns table,.w2ui-grid .w2ui-grid-body .w2ui-grid-fcolumns table{height:auto}.w2ui-grid .w2ui-grid-body .w2ui-grid-columns .w2ui-resizer,.w2ui-grid .w2ui-grid-body .w2ui-grid-fcolumns .w2ui-resizer{position:absolute;z-index:1000;display:block;background-image:none;background-color:rgba(0,0,0,0);padding:0;margin:0;width:6px;height:12px;cursor:ew-resize}.w2ui-grid .w2ui-grid-body .w2ui-grid-frecords,.w2ui-grid .w2ui-grid-body .w2ui-grid-records{position:absolute;left:0;right:0;top:0;bottom:0}.w2ui-grid .w2ui-grid-body .w2ui-grid-frecords table tr.w2ui-odd,.w2ui-grid .w2ui-grid-body .w2ui-grid-records table tr.w2ui-odd{color:inherit;background-color:#fff;border-bottom:1px solid #f5f5f5}.w2ui-grid .w2ui-grid-body .w2ui-grid-frecords table tr.w2ui-odd.w2ui-record-hover,.w2ui-grid .w2ui-grid-body .w2ui-grid-frecords table tr.w2ui-odd:hover,.w2ui-grid .w2ui-grid-body .w2ui-grid-records table tr.w2ui-odd.w2ui-record-hover,.w2ui-grid .w2ui-grid-body .w2ui-grid-records table tr.w2ui-odd:hover{color:inherit;background-color:#f3f3f3}.w2ui-grid .w2ui-grid-body .w2ui-grid-frecords table tr.w2ui-odd.w2ui-empty-record:hover,.w2ui-grid .w2ui-grid-body .w2ui-grid-records table tr.w2ui-odd.w2ui-empty-record:hover{background-color:#fff}.w2ui-grid .w2ui-grid-body .w2ui-grid-frecords table tr.w2ui-even,.w2ui-grid .w2ui-grid-body .w2ui-grid-records table tr.w2ui-even{color:inherit;background-color:#fbfbfb;border-bottom:1px dotted #f5f5f5}.w2ui-grid .w2ui-grid-body .w2ui-grid-frecords table tr.w2ui-even.w2ui-record-hover,.w2ui-grid .w2ui-grid-body .w2ui-grid-frecords table tr.w2ui-even:hover,.w2ui-grid .w2ui-grid-body .w2ui-grid-records table tr.w2ui-even.w2ui-record-hover,.w2ui-grid .w2ui-grid-body .w2ui-grid-records table tr.w2ui-even:hover{color:inherit;background-color:#f3f3f3}.w2ui-grid .w2ui-grid-body .w2ui-grid-frecords table tr.w2ui-even.w2ui-empty-record:hover,.w2ui-grid .w2ui-grid-body .w2ui-grid-records table tr.w2ui-even.w2ui-empty-record:hover{background-color:#fbfbfb}.w2ui-grid .w2ui-grid-body .w2ui-grid-frecords table tr td.w2ui-selected,.w2ui-grid .w2ui-grid-body .w2ui-grid-frecords table tr.w2ui-selected,.w2ui-grid .w2ui-grid-body .w2ui-grid-records table tr td.w2ui-selected,.w2ui-grid .w2ui-grid-body .w2ui-grid-records table tr.w2ui-selected{color:#000!important;background-color:#d9eaff!important;border-bottom:1px solid transparent}.w2ui-grid .w2ui-grid-body .w2ui-grid-frecords table tr td.w2ui-inactive,.w2ui-grid .w2ui-grid-body .w2ui-grid-frecords table tr.w2ui-inactive,.w2ui-grid .w2ui-grid-body .w2ui-grid-records table tr td.w2ui-inactive,.w2ui-grid .w2ui-grid-body .w2ui-grid-records table tr.w2ui-inactive{background-color:#e8edf5!important}.w2ui-grid .w2ui-grid-body .w2ui-grid-frecords .w2ui-expanded1,.w2ui-grid .w2ui-grid-body .w2ui-grid-records .w2ui-expanded1{height:0;border-bottom:1px solid #b2bac0}.w2ui-grid .w2ui-grid-body .w2ui-grid-frecords .w2ui-expanded1>div,.w2ui-grid .w2ui-grid-body .w2ui-grid-records .w2ui-expanded1>div{height:0;border:0;transition:height .3s,opacity .3s}.w2ui-grid .w2ui-grid-body .w2ui-grid-frecords .w2ui-expanded2,.w2ui-grid .w2ui-grid-body .w2ui-grid-records .w2ui-expanded2{height:0;border-radius:0;border-bottom:1px solid #b2bac0}.w2ui-grid .w2ui-grid-body .w2ui-grid-frecords .w2ui-expanded2>div,.w2ui-grid .w2ui-grid-body .w2ui-grid-records .w2ui-expanded2>div{height:0;border:0;transition:height .3s,opacity .3s}.w2ui-grid .w2ui-grid-body .w2ui-grid-frecords .w2ui-load-more,.w2ui-grid .w2ui-grid-body .w2ui-grid-records .w2ui-load-more{cursor:pointer;background-color:rgba(233,237,243,.5);border-right:1px solid #f1f1f1;height:43px}.w2ui-grid .w2ui-grid-body .w2ui-grid-frecords .w2ui-load-more>div,.w2ui-grid .w2ui-grid-body .w2ui-grid-records .w2ui-load-more>div{text-align:center;color:#777;background-color:rgba(233,237,243,.5);padding:10px 0 15px 0;height:43px;border-top:1px dashed #d6d5d7;border-bottom:1px dashed #d6d5d7;font-size:12px}.w2ui-grid .w2ui-grid-body .w2ui-grid-frecords .w2ui-load-more>div:hover,.w2ui-grid .w2ui-grid-body .w2ui-grid-records .w2ui-load-more>div:hover{color:#438ba2;background-color:#f3f3f3}.w2ui-grid .w2ui-grid-body .w2ui-grid-frecords .w2ui-reoder-empty,.w2ui-grid .w2ui-grid-body .w2ui-grid-records .w2ui-reoder-empty{background-color:#eee;border-bottom:1px dashed #aaa;border-top:1px dashed #aaa}.w2ui-grid .w2ui-grid-body table{border-spacing:0;border-collapse:collapse;table-layout:fixed;width:1px}.w2ui-grid .w2ui-grid-body table .w2ui-head{margin:0;padding:0;border-right:1px solid #dcdcdc;border-bottom:1px solid #dcdcdc;color:#656164;background-image:linear-gradient(#fff,#f9f9f9)}.w2ui-grid .w2ui-grid-body table .w2ui-head>div{padding:7px 6px;white-space:nowrap;text-overflow:ellipsis;overflow:hidden;position:relative}.w2ui-grid .w2ui-grid-body table .w2ui-head.w2ui-reorder-cols-head:hover{cursor:move}.w2ui-grid .w2ui-grid-body table td{border-right:1px solid #f1f1f1;border-bottom:0 solid #d6d5d7;cursor:default;overflow:hidden}.w2ui-grid .w2ui-grid-body table td.w2ui-soft-hidden,.w2ui-grid .w2ui-grid-body table td.w2ui-soft-span{border-right-color:transparent}.w2ui-grid .w2ui-grid-body table td.w2ui-grid-data{margin:0;padding:0}.w2ui-grid .w2ui-grid-body table td.w2ui-grid-data .w2ui-info{position:relative;top:2px;left:-1px;font-size:13px;color:#8d99a7;cursor:pointer;width:18px;display:inline-block;margin-right:3px;text-align:center}.w2ui-grid .w2ui-grid-body table td.w2ui-grid-data .w2ui-clipboard-copy{float:right;margin-top:-15px;width:20px;height:16px;padding:0;text-align:center;cursor:pointer;font-size:13px;color:#8d98a7}.w2ui-grid .w2ui-grid-body table td.w2ui-grid-data .w2ui-clipboard-copy:hover{color:#545961}.w2ui-grid .w2ui-grid-body table td.w2ui-grid-data>div{padding:5px;overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.w2ui-grid .w2ui-grid-body table td.w2ui-grid-data>div.flexible-record{height:auto;overflow:visible;white-space:normal}.w2ui-grid .w2ui-grid-body table td.w2ui-grid-data .w2ui-show-children{width:16px;height:10px;display:inline-block;position:relative;top:-1px;cursor:pointer}.w2ui-grid .w2ui-grid-body table td:last-child{border-right:0}.w2ui-grid .w2ui-grid-body table td:last-child div{text-overflow:clip}.w2ui-grid .w2ui-grid-body table .w2ui-col-number{width:34px;color:#777;background-color:rgba(233,237,243,.5)}.w2ui-grid .w2ui-grid-body table .w2ui-col-number div{padding:0 7px 0 3px;text-align:right}.w2ui-grid .w2ui-grid-body table .w2ui-col-number.w2ui-head{cursor:pointer}.w2ui-grid .w2ui-grid-body table .w2ui-col-select{width:26px}.w2ui-grid .w2ui-grid-body table .w2ui-col-select div{padding:0 0;text-align:center;overflow:hidden}.w2ui-grid .w2ui-grid-body table .w2ui-col-select div input[type=checkbox]{margin-top:0;margin-bottom:0;position:relative}.w2ui-grid .w2ui-grid-body table .w2ui-col-expand{width:26px}.w2ui-grid .w2ui-grid-body table .w2ui-col-expand div{padding:0 0;text-align:center;font-weight:700}.w2ui-grid .w2ui-grid-body table .w2ui-col-order{width:26px}.w2ui-grid .w2ui-grid-body table .w2ui-col-order.w2ui-grid-data div{cursor:move;height:18px;background-image:url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciPgogIDxyZWN0IHN0eWxlPSJmaWxsOiAjYWFhOyIgeD0iMCIgeT0iNCIgaGVpZ2h0PSIzIiB3aWR0aD0iMTYiPjwvcmVjdD4KICA8cmVjdCBzdHlsZT0iZmlsbDogI2FhYTsiIHg9IjAiIHk9IjkiIGhlaWdodD0iMyIgd2lkdGg9IjE2Ij48L3JlY3Q+Cjwvc3ZnPg==);background-position:5px 2px;background-size:14px 12px;background-repeat:no-repeat}.w2ui-grid .w2ui-grid-body table .w2ui-col-selected{background-color:#d1d1d1!important}.w2ui-grid .w2ui-grid-body table .w2ui-row-selected{background-color:#e2e2e2!important}.w2ui-grid .w2ui-grid-body .w2ui-intersection-marker{position:absolute;top:0;left:0;margin-left:-5px;height:26px;width:10px}.w2ui-grid .w2ui-grid-body .w2ui-intersection-marker.left{left:0;margin-left:-5px}.w2ui-grid .w2ui-grid-body .w2ui-intersection-marker.right{right:0;margin-right:5px}.w2ui-grid .w2ui-grid-body .w2ui-intersection-marker .top-marker{position:absolute;top:0;height:0;width:0;border-top:5px solid #72b2ff;border-left:5px solid transparent;border-right:5px solid transparent}.w2ui-grid .w2ui-grid-body .w2ui-intersection-marker .bottom-marker{position:absolute;bottom:0;height:0;width:0;border-bottom:5px solid #72b2ff;border-left:5px solid transparent;border-right:5px solid transparent}.w2ui-grid .w2ui-grid-body div.w2ui-col-header{height:auto!important;width:100%;overflow:hidden;padding-right:10px!important}.w2ui-grid .w2ui-grid-body div.w2ui-col-header>div.w2ui-sort-up{border:4px solid transparent;border-bottom:5px solid #8d99a7;margin-top:-2px;margin-right:-7px;float:right}.w2ui-grid .w2ui-grid-body div.w2ui-col-header>div.w2ui-sort-down{border:4px solid transparent;border-top:5px solid #8d99a7;margin-top:2px;margin-right:-7px;float:right}.w2ui-grid .w2ui-grid-body .w2ui-col-group{text-align:center}.w2ui-grid .w2ui-grid-body .w2ui-grid-scroll1{position:absolute;left:0;bottom:0;border-top:1px solid #ddd;border-right:1px solid #ddd;background-color:#fafafa}.w2ui-grid .w2ui-grid-empty-msg{position:absolute;top:27px;left:0;right:0;bottom:0;background-color:rgba(255,255,255,.65)}.w2ui-grid .w2ui-grid-empty-msg>div{position:absolute;left:0;right:0;top:45%;transform:translateY(-45%);text-align:center;font-size:13px;color:#666}.w2ui-grid .w2ui-changed{background:url(data:image/gif;base64,R0lGODlhCgAKAJEAALAABf///wAAAAAAACH5BAEAAAIALAAAAAAKAAoAAAIPlI8Hy8mbxIsSUnup3rQAADs=) no-repeat top right}.w2ui-grid .w2ui-edit-box{position:absolute;z-index:1001;border:1.5px solid #6299da;pointer-events:auto;padding:2px!important;margin:0!important;background-color:#fff}.w2ui-grid .w2ui-edit-box .w2ui-editable div.w2ui-input{outline:0;padding:.5px 1.5px!important}.w2ui-grid .w2ui-edit-box .w2ui-editable input{top:-2px!important;padding:1.5px!important}.w2ui-grid .w2ui-editable{overflow:hidden;height:100%!important;margin:0!important;padding:3.5px 2px 2px 2px!important}.w2ui-grid .w2ui-editable input{position:relative;top:-1px;border:0!important;border-radius:0!important;border-color:transparent!important;padding:3px!important;display:inline-block;width:100%!important;height:100%!important;pointer-events:auto!important}.w2ui-grid .w2ui-editable div.w2ui-input{position:relative;top:-.5px;border:0 transparent;border-radius:0!important;margin:0!important;padding:5px 3px!important;display:inline-block;width:100%!important;height:100%!important;pointer-events:auto!important;background-color:#fff;white-space:pre;overflow:hidden;-webkit-user-select:text;-moz-user-select:text;-ms-user-select:text;-o-user-select:text;user-select:text}.w2ui-grid .w2ui-editable input.w2ui-select{outline:0!important;background:#fff}.w2ui-grid .w2ui-grid-summary{position:absolute;border-top:1px solid #dcdcdc;box-shadow:0 -1px 4px #f0eeee}.w2ui-grid .w2ui-grid-summary table{color:inherit}.w2ui-grid .w2ui-grid-summary table .w2ui-odd{background-color:#fff}.w2ui-grid .w2ui-grid-summary table .w2ui-even{background-color:#fbfbfb}.w2ui-grid .w2ui-grid-footer{position:absolute;bottom:0;left:0;right:0;margin:0;padding:0;text-align:center;font-size:11px;height:24px;overflow:hidden;-webkit-user-select:text;-moz-user-select:text;-ms-user-select:text;-o-user-select:text;user-select:text;box-shadow:0 -1px 4px #f5f5f5;color:#444;background-color:#f8f8f8;border-top:1px solid #e4e4e4;border-bottom-left-radius:2px;border-bottom-right-radius:2px}.w2ui-grid .w2ui-grid-footer .w2ui-footer-left{float:left;padding-top:5px;padding-left:5px}.w2ui-grid .w2ui-grid-footer .w2ui-footer-right{float:right;padding-top:5px;padding-right:5px}.w2ui-grid .w2ui-grid-footer .w2ui-footer-center{padding:2px;text-align:center}.w2ui-grid .w2ui-grid-footer .w2ui-footer-center .w2ui-footer-nav{width:110px;margin:0 auto;padding:0;text-align:center}.w2ui-grid .w2ui-grid-footer .w2ui-footer-center .w2ui-footer-nav input[type=text]{padding:1px 2px 2px 2px;border-radius:3px;width:40px;text-align:center}.w2ui-grid .w2ui-grid-footer .w2ui-footer-center .w2ui-footer-nav a.w2ui-footer-btn{display:inline-block;border-radius:3px;cursor:pointer;font-size:11px;line-height:16px;padding:1px 5px;width:30px;height:18px;margin-top:-1px;color:#000;background-color:transparent}.w2ui-grid .w2ui-grid-footer .w2ui-footer-center .w2ui-footer-nav a.w2ui-footer-btn:hover{color:#000;background-color:#aec8ff}.w2ui-grid .w2ui-grid-focus-input{position:absolute;top:0;right:0;z-index:-1;opacity:0;overflow:hidden;padding:0;margin:0;width:1px;height:1px;resize:none;border:0}.w2ui-ss .w2ui-grid-body .w2ui-grid-records table tr td.w2ui-selected{background-color:#eef4fe!important}.w2ui-ss .w2ui-grid-body .w2ui-grid-records table tr td.w2ui-inactive{background-color:#f4f6f9!important}.w2ui-ss .w2ui-grid-body .w2ui-grid-records table td{border-right-width:1px;border-bottom:1px solid #efefef}.w2ui-ss .w2ui-grid-body .w2ui-grid-records table tr.w2ui-even,.w2ui-ss .w2ui-grid-body .w2ui-grid-records table tr.w2ui-even:hover,.w2ui-ss .w2ui-grid-body .w2ui-grid-records table tr.w2ui-odd,.w2ui-ss .w2ui-grid-body .w2ui-grid-records table tr.w2ui-odd:hover{background-color:inherit}.w2ui-ss .w2ui-grid-body .w2ui-grid-records table tr:first-child td{border-top:0;border-bottom:0}.w2ui-ss .w2ui-grid-body .w2ui-grid-frecords table tr td.w2ui-selected{background-color:#eef4fe!important}.w2ui-ss .w2ui-grid-body .w2ui-grid-frecords table tr td.w2ui-inactive{background-color:#f4f6f9!important}.w2ui-ss .w2ui-grid-body .w2ui-grid-frecords table td{border-right-width:1px;border-bottom:1px solid #efefef}.w2ui-ss .w2ui-grid-body .w2ui-grid-frecords table tr.w2ui-even,.w2ui-ss .w2ui-grid-body .w2ui-grid-frecords table tr.w2ui-even:hover,.w2ui-ss .w2ui-grid-body .w2ui-grid-frecords table tr.w2ui-odd,.w2ui-ss .w2ui-grid-body .w2ui-grid-frecords table tr.w2ui-odd:hover{background-color:inherit}.w2ui-ss .w2ui-grid-body .w2ui-grid-frecords table tr:first-child td{border-bottom:0}.w2ui-ss .w2ui-grid-body .w2ui-selection{position:absolute;z-index:1000;border:1.5px solid #6299da;pointer-events:none}.w2ui-ss .w2ui-grid-body .w2ui-selection .w2ui-selection-resizer{cursor:crosshair;position:absolute;bottom:0;right:0;width:6px;height:6px;margin-right:-3px;margin-bottom:-3px;background-color:#457fc2;border:.5px solid #fff;outline:1px solid #fff;pointer-events:auto}.w2ui-ss .w2ui-grid-body .w2ui-selection.w2ui-inactive{border:1.5px solid #c0c2c5}.w2ui-ss .w2ui-grid-body .w2ui-selection.w2ui-inactive .w2ui-selection-resizer{background-color:#b0b0b0}.w2ui-ss .w2ui-grid-body .w2ui-soft-range{position:absolute;pointer-events:none;white-space:nowrap;overflow:hidden;text-overflow:ellipsis}.w2ui-ss .w2ui-grid-body .w2ui-changed{background:inherit}.w2ui-ss .w2ui-grid-body .w2ui-editable input{outline:0!important}.w2ui-info-bubble table{font-family:OpenSans;font-size:12px;color:#fff;text-shadow:1px 1px solid #999}.w2ui-info-bubble table tr td:first-child{white-space:nowrap;padding:2px;padding-right:10px;color:#ddd;vertical-align:top}.w2ui-info-bubble table tr td:last-child{white-space:pre;padding:2px}.w2ui-overlay .w2ui-grid-search-suggest{border-top-left-radius:5px;border-top-right-radius:5px;padding:10px;background-color:#fff;border-bottom:1px solid #e6e6e6;color:#444}.w2ui-overlay .w2ui-grid-search-single{font-size:12px;padding-top:10px}.w2ui-overlay .w2ui-grid-search-single .field{white-space:nowrap;text-overflow:ellipsis;overflow:hidden;border:1px solid #a9b6c2;border-radius:4px;padding:4px 12px;margin:0 2px;color:#4295d4;background-color:#f5f9fe}.w2ui-overlay .w2ui-grid-search-single .operator{display:inline-block;color:#000;background-color:#e6e6e6;border-radius:4px;margin:0 4px;padding:6px 10px}.w2ui-overlay .w2ui-grid-search-single .value{white-space:nowrap;text-overflow:ellipsis;overflow:hidden;border:1px solid #a9b6c2;border-radius:4px;margin:0 2px;padding:4px 12px}.w2ui-overlay .w2ui-grid-search-single .buttons{text-align:left;padding:15px 10px 10px 0}.w2ui-overlay .w2ui-grid-search-advanced{text-align:left;padding:0;background-color:#fff;text-shadow:none;border:1px solid #cdcdd8;box-shadow:0 3px 14px 1px #e8e8e8}.w2ui-overlay .w2ui-grid-search-advanced .search-title{padding:20px 0 9px 20px;font-size:17px;font-weight:700;color:#555}.w2ui-overlay .w2ui-grid-search-advanced .search-title .search-logic{float:right;padding-right:10px}.w2ui-overlay .w2ui-grid-search-advanced table{color:#5f5f5f;font-size:13px;padding:12px 4px 0 4px}.w2ui-overlay .w2ui-grid-search-advanced table td{padding:4px;min-height:40px}.w2ui-overlay .w2ui-grid-search-advanced table td.caption{text-align:right;padding-right:5px;padding-left:20px}.w2ui-overlay .w2ui-grid-search-advanced table td.operator{text-align:left;padding:5px}.w2ui-overlay .w2ui-grid-search-advanced table td.operator select{width:100%;color:#000}.w2ui-overlay .w2ui-grid-search-advanced table td.value{padding-right:5px;padding-left:5px}.w2ui-overlay .w2ui-grid-search-advanced table td.value input[type=text]{border-radius:3px;padding:5px;margin-right:3px;height:28px}.w2ui-overlay .w2ui-grid-search-advanced table td.value select{padding:0 20px 5px 5px;margin-right:3px;height:28px}.w2ui-overlay .w2ui-grid-search-advanced table td.actions:nth-child(1){padding:25px 10px 10px 10px;text-align:left}.w2ui-overlay .w2ui-grid-search-advanced table td.actions:nth-child(2){padding:25px 10px 10px 10px;text-align:right;background-color:#fff}.w2ui-grid-skip{width:50px;margin:-6px 3px;padding:3px!important}.w2ui-popup{position:fixed;z-index:1600;overflow:hidden;font-family:OpenSans;border-radius:6px;padding:0;margin:0;border:1px solid #777;background-color:#fafafa;box-shadow:0 0 25px #555}.w2ui-popup,.w2ui-popup *{box-sizing:border-box}.w2ui-popup.w2ui-anim-open{opacity:0;transform:scale(.8)}.w2ui-popup.w2ui-anim-close{opacity:0;transform:scale(.9)}.w2ui-popup .w2ui-popup-title{padding:10px;border-radius:6px 6px 0 0;background-color:#fff;border-bottom:1px solid #eee;position:absolute;overflow:hidden;height:42px;left:0;right:0;top:0;text-overflow:ellipsis;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;-o-user-select:none;user-select:none;cursor:move;font-size:17px;color:#555;z-index:300}.w2ui-popup .w2ui-popup-button{float:right;width:25px;height:23px;cursor:pointer;color:#888;margin:0 0 0 5px}.w2ui-popup .w2ui-popup-button span.w2ui-icon{width:24px;height:23px}.w2ui-popup .w2ui-popup-button.w2ui-popup-close:hover{color:#222}.w2ui-popup .w2ui-popup-button.w2ui-popup-max:hover{color:#222}.w2ui-popup .w2ui-box,.w2ui-popup .w2ui-box-temp{position:absolute;left:0;right:0;top:42px;bottom:58px;z-index:100}.w2ui-popup .w2ui-popup-body{font-size:12px;line-height:130%;padding:0 7px 7px 7px;color:#000;background-color:#fafafa;position:absolute;overflow:auto;width:100%;height:100%}.w2ui-popup .w2ui-popup-buttons{font-size:11px;padding:14px;border-radius:0 0 6px 6px;border-top:1px solid #eee;background-color:#fff;text-align:center;position:absolute;overflow:hidden;height:56px;left:0;right:0;bottom:0;z-index:200}.w2ui-popup .w2ui-popup-no-title{border-top-left-radius:6px;border-top-right-radius:6px;top:0}.w2ui-popup .w2ui-popup-no-buttons{border-bottom-left-radius:6px;border-bottom-right-radius:6px;bottom:0}.w2ui-popup .w2ui-msg-text{font-size:14px;line-height:1.5}.w2ui-popup .w2ui-prompt{font-size:12px;padding:0 10px}.w2ui-popup .w2ui-prompt.textarea{margin-top:20px}.w2ui-popup .w2ui-prompt>div{margin-bottom:5px}.w2ui-popup .w2ui-prompt>label{margin-right:5px}.w2ui-popup .w2ui-prompt input{width:230px}.w2ui-popup .w2ui-prompt textarea{width:100%;height:50px;resize:none}.w2ui-message{font-size:12px;position:absolute;z-index:250;background-color:#fcfcfc;border:1px solid #999;box-shadow:0 0 15px #aaa;box-sizing:border-box;border-top:0;border-radius:0 0 6px 6px;overflow:auto}.w2ui-message .w2ui-msg-text{font-size:14px;line-height:1.5}.w2ui-message .w2ui-message-body{position:absolute;top:0;bottom:45px;left:0;right:0;overflow:auto}.w2ui-message .w2ui-message-body .w2ui-centered{line-height:1.5}.w2ui-message .w2ui-message-buttons{position:absolute;height:45px;bottom:0;left:0;right:0;border-top:1px solid #efefef;background-color:#fff;text-align:center;padding:8px}.w2ui-sidebar{position:relative;cursor:default;overflow:hidden;background-color:#fbfbfb;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;-ms-box-sizing:border-box;-o-box-sizing:border-box;box-sizing:border-box}.w2ui-sidebar *{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;-ms-box-sizing:border-box;-o-box-sizing:border-box;box-sizing:border-box}.w2ui-sidebar>div{position:absolute;overflow:hidden}.w2ui-sidebar .w2ui-sidebar-top{position:absolute;z-index:2;top:0;left:0;right:0}.w2ui-sidebar .w2ui-sidebar-top .w2ui-flat-left,.w2ui-sidebar .w2ui-sidebar-top .w2ui-flat-right{position:absolute;right:2px;top:2px;height:24px;padding:5px;border-radius:2px;background-size:16px 12px;background-position:center center;background-repeat:no-repeat;background-color:#fbfbfb}.w2ui-sidebar .w2ui-sidebar-top .w2ui-flat-left:hover,.w2ui-sidebar .w2ui-sidebar-top .w2ui-flat-right:hover{background-color:#f1f1f1}.w2ui-sidebar .w2ui-sidebar-top .w2ui-flat-left{left:auto;width:25px;background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAQAAADZc7J/AAAAzklEQVR4Ae2THRDEMBCFzy1ucatb3eJ2uhi3uNUtbnGrW9zi1rOdNzdvdl7nDpvYt/9/r7+/51myZZf/zXkD2iMHHRSb0x3oskwMieK05PwEXqP4ExSL0wp0ROao2OOuMPOMdUL6XU1/oGLcFWb+NqyTd2W/P/2qTr9h+nFXhOkHXRHiNyjrgp/U/V+WaQcaNY13zZI0A1JvcVqAnrGDTdtDtZUHjHIJhxxVLN0iqXgCP1l/7h8U9kc6abyJ4/eNWPpGdBv+XdUK0K8cnvcBly2rDr7C1HQAAAAASUVORK5CYII=)}.w2ui-sidebar .w2ui-sidebar-top .w2ui-flat-right{left:2px;width:auto;background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAQAAADZc7J/AAAAz0lEQVR4Ae2UG7TGMBCEr1vd4la3uMUtuli3utWtbnGLW9zi9l/bDMzJG7u12cfJfLunf1+UEC9Bv0vVQwJ8hjRCaZafflb1C9RQf4OD0gSDE+i+PiJAabFhQc1y1AYYsJGLY3lgxM17uWPO56yPiFDqVPWgRtpIHSd1zPnwkBsdI58OlNwx4fP2X0TgfMTOoHSdKOXkpyNvEyQh7ul+4swxJSTQuwNDxz68l/ukVNbu0Neen5Z+KvzWxBAqHds349uPFJ/jVOrPjxUq++OLf+20q5+noXo0AAAAAElFTkSuQmCC)}.w2ui-sidebar .w2ui-sidebar-bottom{position:absolute;z-index:2;bottom:0;left:0;right:0}.w2ui-sidebar .w2ui-sidebar-body{position:absolute;z-index:1;overflow:auto;top:0;bottom:0;left:0;right:0;padding:2px 0;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;-o-user-select:none;user-select:none}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node{position:relative;border-radius:4px;margin:0 3px;padding:1px 0;border:1px solid transparent}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node .w2ui-node-text{color:#000;text-shadow:0 0 0 #fff;pointer-events:none}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node .w2ui-node-text:hover{color:inherit}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node .w2ui-node-image>span{color:#737485}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node .w2ui-node-handle{display:inline-block;padding:0;margin:0;height:100%;position:absolute}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node:hover{background-color:#f1f1f1}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node .w2ui-node-image{width:22px;text-align:center;pointer-events:none}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node .w2ui-node-image>span{color:#888}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node.w2ui-disabled,.w2ui-sidebar .w2ui-sidebar-body .w2ui-node.w2ui-disabled:hover{background:0 0}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node.w2ui-disabled .w2ui-node-image,.w2ui-sidebar .w2ui-sidebar-body .w2ui-node.w2ui-disabled .w2ui-node-image>span,.w2ui-sidebar .w2ui-sidebar-body .w2ui-node.w2ui-disabled .w2ui-node-text,.w2ui-sidebar .w2ui-sidebar-body .w2ui-node.w2ui-disabled:hover .w2ui-node-image,.w2ui-sidebar .w2ui-sidebar-body .w2ui-node.w2ui-disabled:hover .w2ui-node-image>span,.w2ui-sidebar .w2ui-sidebar-body .w2ui-node.w2ui-disabled:hover .w2ui-node-text{opacity:.4;color:#000;text-shadow:0 0 0 #fff}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node button,.w2ui-sidebar .w2ui-sidebar-body .w2ui-node input{pointer-events:auto}.w2ui-sidebar .w2ui-sidebar-body .w2ui-selected,.w2ui-sidebar .w2ui-sidebar-body .w2ui-selected:hover{background-color:#f3f5ff;position:relative;border:1px solid #dee1ff}.w2ui-sidebar .w2ui-sidebar-body .w2ui-selected .w2ui-node-image,.w2ui-sidebar .w2ui-sidebar-body .w2ui-selected .w2ui-node-image>span,.w2ui-sidebar .w2ui-sidebar-body .w2ui-selected .w2ui-node-text,.w2ui-sidebar .w2ui-sidebar-body .w2ui-selected:hover .w2ui-node-image,.w2ui-sidebar .w2ui-sidebar-body .w2ui-selected:hover .w2ui-node-image>span,.w2ui-sidebar .w2ui-sidebar-body .w2ui-selected:hover .w2ui-node-text{color:inherit;text-shadow:0 0 0 #fff}.w2ui-sidebar .w2ui-sidebar-body .w2ui-selected:before{content:"";border:1px dashed transparent;border-radius:4px;position:absolute;top:-1px;bottom:-1px;left:-1px;right:-1px;pointer-events:none}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node-text{white-space:nowrap;padding:5px 0 5px 3px;margin:1px 0 1px 22px;position:relative;z-index:1;font-size:12px}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node-group{white-space:nowrap;overflow:hidden;padding:10px 0 10px 10px;margin:0;cursor:default;color:#6a5e88;background-color:transparent}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node-group :nth-child(1){margin-right:10px;float:right;color:transparent}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node-group :nth-child(2){font-weight:400;text-transform:uppercase}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node-sub{overflow:hidden}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node-data{padding:2px}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node-data .w2ui-node-image{padding:3px 0 0 0;float:left}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node-data .w2ui-node-image>span{font-size:16px;color:#737485;text-shadow:0 0 0 #fff}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node-data .w2ui-node-image.w2ui-icon{margin-top:3px}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node-data .w2ui-node-count{float:right;border:1px solid #f6fcf4;border-radius:20px;width:auto;padding:2px 7px;margin:3px 4px -2px 0;background-color:#f2f8f0;color:#666;box-shadow:0 0 2px #474545;text-shadow:1px 1px 0 #fff;position:relative;z-index:2}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node-data .w2ui-collapsed,.w2ui-sidebar .w2ui-sidebar-body .w2ui-node-data .w2ui-expanded{float:right;width:auto;height:18px;position:relative;z-index:2}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node-data .w2ui-collapsed span,.w2ui-sidebar .w2ui-sidebar-body .w2ui-node-data .w2ui-expanded span{border-color:transparent;background-color:transparent;box-shadow:none;padding:2px 5px;border-radius:0}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node-data .w2ui-collapsed span:after,.w2ui-sidebar .w2ui-sidebar-body .w2ui-node-data .w2ui-expanded span:after{content:"";position:absolute;border-left:5px solid grey;border-top:5px solid transparent;border-bottom:5px solid transparent;transform:rotateZ(-90deg);pointer-events:none;margin-left:-4px;margin-top:7px}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node-data .w2ui-collapsed span:hover,.w2ui-sidebar .w2ui-sidebar-body .w2ui-node-data .w2ui-expanded span:hover{border-color:transparent;background-color:transparent}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node-data .w2ui-collapsed span:after{transform:rotateZ(90deg)}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node-flat{display:block;padding:2px 0;text-align:center}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node-flat .w2ui-node-image{float:none;text-align:center;width:auto;padding:1px 0}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node-flat .w2ui-node-image>span{font-size:16px;color:#737485;text-shadow:0 0 0 #fff}.w2ui-sidebar .w2ui-sidebar-body .w2ui-node-flat .w2ui-node-image.w2ui-icon{width:21px}.w2ui-tabs{cursor:default;overflow:hidden;position:relative;background-color:#fff;min-height:28px;padding:0;margin:0}.w2ui-tabs .w2ui-tabs-line{position:absolute;left:0;right:0;bottom:0;z-index:1;border:0;height:1px;background-color:#e2e2e2}.w2ui-tabs .w2ui-scroll-left,.w2ui-tabs .w2ui-scroll-right{z-index:30;display:flex}.w2ui-tabs .w2ui-scroll-wrapper{display:flex;flex-direction:row;flex-wrap:nowrap;justify-content:flex-start;align-content:flex-start;padding:0 2px}.w2ui-tabs .w2ui-scroll-wrapper .w2ui-tab{height:28px;position:relative;z-index:20;padding:7px 20px 4px 20px;text-align:center;color:#000;background-color:transparent;border:2px solid transparent;white-space:nowrap;margin:0 1px;border-radius:0;cursor:default;user-select:none}.w2ui-tabs .w2ui-scroll-wrapper .w2ui-tab.active{color:#0175ff;background-color:transparent;border:2px solid transparent;border-bottom:2px solid #0175ff;margin-bottom:0}.w2ui-tabs .w2ui-scroll-wrapper .w2ui-tab:hover{background-color:#dfe1e630}.w2ui-tabs .w2ui-scroll-wrapper .w2ui-tab.moving{color:inherit;background-color:#eee;border:2px solid transparent;border-radius:0;margin-bottom:0}.w2ui-tabs .w2ui-scroll-wrapper .w2ui-tab.closable{padding:6px 28px 6px 20px}.w2ui-tabs .w2ui-scroll-wrapper .w2ui-tab .w2ui-tab-close{position:absolute;right:3px;top:5px;color:#555;float:right;margin-top:-3px;padding:2px 4px;width:20px;height:20px;opacity:.6;border:0;border-top:3px solid transparent;border-radius:3px}.w2ui-tabs .w2ui-scroll-wrapper .w2ui-tab .w2ui-tab-close:hover{background-color:#f9e7e7;color:red;opacity:1;font-weight:700}.w2ui-tabs .w2ui-scroll-wrapper .w2ui-tab .w2ui-tab-close:active{background-color:#ffd1d1}.w2ui-tabs .w2ui-scroll-wrapper .w2ui-tab .w2ui-tab-close:before{position:relative;top:-2px;left:0;color:inherit;text-shadow:inherit;content:'x'}.w2ui-tabs .w2ui-scroll-wrapper .w2ui-tabs-right{padding:8px 2px;width:100%;text-align:right;white-space:nowrap}.w2ui-tabs.w2ui-tabs-up .w2ui-tabs-line{top:0;bottom:auto}.w2ui-tabs.w2ui-tabs-up .w2ui-scroll-wrapper .w2ui-tab{border:2px solid transparent;border-top:2px solid transparent;border-radius:0 0 4px 4px}.w2ui-tabs.w2ui-tabs-up .w2ui-scroll-wrapper .w2ui-tab.active{border:2px solid transparent;border-top:2px solid #0175ff;margin-top:0}.w2ui-toolbar{background-color:#f5f5f5;user-select:none;padding:2px}.w2ui-toolbar .w2ui-tb-line{overflow:hidden;position:relative;min-height:28px;padding:2px;margin:0}.w2ui-toolbar .disabled{opacity:.3}.w2ui-toolbar .w2ui-scroll-left,.w2ui-toolbar .w2ui-scroll-right{z-index:30;display:flex}.w2ui-toolbar .w2ui-tb-line:nth-child(2),.w2ui-toolbar .w2ui-tb-line:nth-child(3),.w2ui-toolbar .w2ui-tb-line:nth-child(4){border-top:1px solid #e7e7e7;padding-top:4px;margin:0}.w2ui-toolbar .w2ui-scroll-wrapper{display:flex;flex-direction:row;flex-wrap:nowrap;justify-content:flex-start;align-content:flex-start;padding:0}.w2ui-toolbar .w2ui-scroll-wrapper .w2ui-tb-button{position:relative;z-index:20;height:30px;min-width:30px;padding:2px;border:1px solid transparent;border-radius:4px;background-color:transparent;white-space:nowrap;margin:0 1px;cursor:default;user-select:none;flex-shrink:0}.w2ui-toolbar .w2ui-scroll-wrapper .w2ui-tb-button .w2ui-tb-icon{float:left;width:22px;margin:4px 0 0 1px;text-align:center}.w2ui-toolbar .w2ui-scroll-wrapper .w2ui-tb-button .w2ui-tb-icon>span{font-size:15px;color:#8d99a7}.w2ui-toolbar .w2ui-scroll-wrapper .w2ui-tb-button .w2ui-tb-text{margin-left:20px;color:#000;padding:5px}.w2ui-toolbar .w2ui-scroll-wrapper .w2ui-tb-button .w2ui-tb-text .w2ui-tb-color-box{display:inline-block;height:13px;width:13px;margin:0 -1px -2px 0;border-radius:1px;border:1px solid #fff;box-shadow:0 0 1px #555}.w2ui-toolbar .w2ui-scroll-wrapper .w2ui-tb-button .w2ui-tb-text .w2ui-tb-count{padding:0 0 0 4px}.w2ui-toolbar .w2ui-scroll-wrapper .w2ui-tb-button .w2ui-tb-text .w2ui-tb-count>span{border:1px solid #f6fcf4;border-radius:11px;width:auto;height:18px;padding:0 6px 1px 6px;background-color:#f2f8f0;color:#666;box-shadow:0 0 2px #474545;text-shadow:1px 1px 0 #fff}.w2ui-toolbar .w2ui-scroll-wrapper .w2ui-tb-button .w2ui-tb-text .w2ui-tb-down{display:inline-block;width:10px}.w2ui-toolbar .w2ui-scroll-wrapper .w2ui-tb-button .w2ui-tb-text .w2ui-tb-down>span{display:inline-block;position:relative;top:3px;left:3px;border:4px solid transparent;border-top:5px solid #8d99a7}.w2ui-toolbar .w2ui-scroll-wrapper .w2ui-tb-button.over{border:1px solid transparent;background-color:#eaeaed}.w2ui-toolbar .w2ui-scroll-wrapper .w2ui-tb-button.over .w2ui-tb-text{color:#000}.w2ui-toolbar .w2ui-scroll-wrapper .w2ui-tb-button.checked{border:1px solid #d2d2d2;background-color:#fff}.w2ui-toolbar .w2ui-scroll-wrapper .w2ui-tb-button.checked .w2ui-tb-text{color:#000}.w2ui-toolbar .w2ui-scroll-wrapper .w2ui-tb-button.down{border:1px solid #ccc;background-color:#eaeaed}.w2ui-toolbar .w2ui-scroll-wrapper .w2ui-tb-button.down .w2ui-tb-text{color:#666}.w2ui-toolbar .w2ui-scroll-wrapper .w2ui-tb-button.no-icon .w2ui-tb-text{margin-left:0}.w2ui-toolbar .w2ui-scroll-wrapper .w2ui-tb-right{width:100%;text-align:right;white-space:nowrap}.w2ui-toolbar .w2ui-scroll-wrapper .w2ui-tb-break{background-image:linear-gradient(to bottom,rgba(153,153,153,.1) 0,#999 40%,#999 60%,rgba(153,153,153,.1) 100%);width:1px;height:24px;padding:0;margin:3px 6px}.w2ui-toolbar .w2ui-scroll-wrapper .w2ui-tb-html{white-space:nowrap}.w2ui-toolbar .w2ui-scroll-wrapper .w2ui-tb-spacer{width:100%} \ No newline at end of file diff --git "a/spaces/Gmq-x/gpt-academic/crazy_functions/\346\200\273\347\273\223word\346\226\207\346\241\243.py" "b/spaces/Gmq-x/gpt-academic/crazy_functions/\346\200\273\347\273\223word\346\226\207\346\241\243.py" deleted file mode 100644 index f1fe20171cc54aec0c79f4961e71b57845f252d5..0000000000000000000000000000000000000000 --- "a/spaces/Gmq-x/gpt-academic/crazy_functions/\346\200\273\347\273\223word\346\226\207\346\241\243.py" +++ /dev/null @@ -1,127 +0,0 @@ -from toolbox import update_ui -from toolbox import CatchException, report_execption, write_results_to_file -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -fast_debug = False - - -def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): - import time, os - # pip install python-docx 用于docx格式,跨平台 - # pip install pywin32 用于doc格式,仅支持Win平台 - for index, fp in enumerate(file_manifest): - if fp.split(".")[-1] == "docx": - from docx import Document - doc = Document(fp) - file_content = "\n".join([para.text for para in doc.paragraphs]) - else: - import win32com.client - word = win32com.client.Dispatch("Word.Application") - word.visible = False - # 打开文件 - print('fp', os.getcwd()) - doc = word.Documents.Open(os.getcwd() + '/' + fp) - # file_content = doc.Content.Text - doc = word.ActiveDocument - file_content = doc.Range().Text - doc.Close() - word.Quit() - - print(file_content) - # private_upload里面的文件名在解压zip后容易出现乱码(rar和7z格式正常),故可以只分析文章内容,不输入文件名 - from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf - from request_llm.bridge_all import model_info - max_token = model_info[llm_kwargs['llm_model']]['max_token'] - TOKEN_LIMIT_PER_FRAGMENT = max_token * 3 // 4 - paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf( - txt=file_content, - get_token_fn=model_info[llm_kwargs['llm_model']]['token_cnt'], - limit=TOKEN_LIMIT_PER_FRAGMENT - ) - this_paper_history = [] - for i, paper_frag in enumerate(paper_fragments): - i_say = f'请对下面的文章片段用中文做概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{paper_frag}```' - i_say_show_user = f'请对下面的文章片段做概述: {os.path.abspath(fp)}的第{i+1}/{len(paper_fragments)}个片段。' - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say_show_user, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history=[], - sys_prompt="总结文章。" - ) - - chatbot[-1] = (i_say_show_user, gpt_say) - history.extend([i_say_show_user,gpt_say]) - this_paper_history.extend([i_say_show_user,gpt_say]) - - # 已经对该文章的所有片段总结完毕,如果文章被切分了, - if len(paper_fragments) > 1: - i_say = f"根据以上的对话,总结文章{os.path.abspath(fp)}的主要内容。" - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history=this_paper_history, - sys_prompt="总结文章。" - ) - - history.extend([i_say,gpt_say]) - this_paper_history.extend([i_say,gpt_say]) - - res = write_results_to_file(history) - chatbot.append(("完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - res = write_results_to_file(history) - chatbot.append(("所有文件都总结完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - -@CatchException -def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - import glob, os - - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "批量总结Word文档。函数插件贡献者: JasonGuo1"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - from docx import Document - except: - report_execption(chatbot, history, - a=f"解析项目: {txt}", - b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade python-docx pywin32```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 清空历史,以免输入溢出 - history = [] - - # 检测输入参数,如没有给定输入参数,直接退出 - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 搜索需要处理的文件清单 - if txt.endswith('.docx') or txt.endswith('.doc'): - file_manifest = [txt] - else: - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.docx', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.doc', recursive=True)] - - # 如果没找到任何文件 - if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.docx或doc文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 开始正式执行任务 - yield from 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/detr/detr_r50_8x2_150e_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/detr/detr_r50_8x2_150e_coco.py deleted file mode 100644 index ba276f447c2a858f6ae454fdd1cb0c95c831092c..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/detr/detr_r50_8x2_150e_coco.py +++ /dev/null @@ -1,131 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' -] -model = dict( - type='DETR', - pretrained='torchvision://resnet50', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(3, ), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), - norm_eval=True, - style='pytorch'), - bbox_head=dict( - type='TransformerHead', - num_classes=80, - in_channels=2048, - num_fcs=2, - transformer=dict( - type='Transformer', - embed_dims=256, - num_heads=8, - num_encoder_layers=6, - num_decoder_layers=6, - feedforward_channels=2048, - dropout=0.1, - act_cfg=dict(type='ReLU', inplace=True), - norm_cfg=dict(type='LN'), - num_fcs=2, - pre_norm=False, - return_intermediate_dec=True), - positional_encoding=dict( - type='SinePositionalEncoding', num_feats=128, normalize=True), - loss_cls=dict( - type='CrossEntropyLoss', - bg_cls_weight=0.1, - use_sigmoid=False, - loss_weight=1.0, - class_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=5.0), - loss_iou=dict(type='GIoULoss', loss_weight=2.0)), - # training and testing settings - train_cfg=dict( - assigner=dict( - type='HungarianAssigner', - cls_cost=dict(type='ClassificationCost', weight=1.), - reg_cost=dict(type='BBoxL1Cost', weight=5.0), - iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0))), - test_cfg=dict(max_per_img=100)) -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -# train_pipeline, NOTE the img_scale and the Pad's size_divisor is different -# from the default setting in mmdet. -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict( - type='AutoAugment', - policies=[[ - dict( - type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), - (608, 1333), (640, 1333), (672, 1333), (704, 1333), - (736, 1333), (768, 1333), (800, 1333)], - multiscale_mode='value', - keep_ratio=True) - ], - [ - dict( - type='Resize', - img_scale=[(400, 1333), (500, 1333), (600, 1333)], - multiscale_mode='value', - keep_ratio=True), - dict( - type='RandomCrop', - crop_type='absolute_range', - crop_size=(384, 600), - allow_negative_crop=True), - dict( - type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), - (576, 1333), (608, 1333), (640, 1333), - (672, 1333), (704, 1333), (736, 1333), - (768, 1333), (800, 1333)], - multiscale_mode='value', - override=True, - keep_ratio=True) - ]]), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=1), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) -] -# test_pipeline, NOTE the Pad's size_divisor is different from the default -# setting (size_divisor=32). While there is little effect on the performance -# whether we use the default setting or use size_divisor=1. -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=1), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# optimizer -optimizer = dict( - type='AdamW', - lr=0.0001, - weight_decay=0.0001, - paramwise_cfg=dict( - custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)})) -optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) -# learning policy -lr_config = dict(policy='step', step=[100]) -runner = dict(type='EpochBasedRunner', max_epochs=150) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco.py deleted file mode 100644 index da89e09c03078d00679d3e9caf5181bbec60a332..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco.py +++ /dev/null @@ -1,42 +0,0 @@ -_base_ = './htc_r50_fpn_1x_coco.py' -model = dict( - pretrained='open-mmlab://resnext101_64x4d', - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True))) -# dataset settings -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True), - dict( - type='Resize', - img_scale=[(1600, 400), (1600, 1400)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='SegRescale', scale_factor=1 / 8), - dict(type='DefaultFormatBundle'), - dict( - type='Collect', - keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), -] -data = dict( - samples_per_gpu=1, workers_per_gpu=1, train=dict(pipeline=train_pipeline)) -# learning policy -lr_config = dict(step=[16, 19]) -runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/core/seg/__init__.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/core/seg/__init__.py deleted file mode 100644 index 93bc129b685e4a3efca2cc891729981b2865900d..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/core/seg/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .builder import build_pixel_sampler -from .sampler import BasePixelSampler, OHEMPixelSampler - -__all__ = ['build_pixel_sampler', 'BasePixelSampler', 'OHEMPixelSampler'] diff --git a/spaces/Grassss/nb/README.md b/spaces/Grassss/nb/README.md deleted file mode 100644 index 5d6936218874c647b5d22e13ad4be7edb8936f92..0000000000000000000000000000000000000000 --- a/spaces/Grassss/nb/README.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: bingo -emoji: 😊 -colorFrom: red -colorTo: red -sdk: docker -license: mit -duplicated_from: hf4all/bingo ---- - -
- -# Bingo - -Bingo,一个让你呼吸顺畅 New Bing。 - -高度还原 New Bing 网页版的主要操作,国内可用,兼容绝大多数微软 Bing AI 的功能,可自行部署使用。 - -![Github stars](https://badgen.net/github/stars/weaigc/bingo?icon=github&label=stars) -![Gthub issues](https://img.shields.io/github/issues/weaigc/bingo) -[![docker build](https://github.com/weaigc/bingo/actions/workflows/docker.yml/badge.svg)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![docker hub](https://badgen.net/docker/size/weaigc/bingo?icon=docker&label=image%20size)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![MIT License](https://img.shields.io/badge/license-MIT-97c50f)](https://github.com/weaigc/bingo/blob/main/license) - -问题反馈请前往 https://github.com/weaigc/bingo/issues -
- - diff --git a/spaces/GroveStreet/GTA_SOVITS/vdecoder/hifigan/utils.py b/spaces/GroveStreet/GTA_SOVITS/vdecoder/hifigan/utils.py deleted file mode 100644 index 9c93c996d3cc73c30d71c1fc47056e4230f35c0f..0000000000000000000000000000000000000000 --- a/spaces/GroveStreet/GTA_SOVITS/vdecoder/hifigan/utils.py +++ /dev/null @@ -1,68 +0,0 @@ -import glob -import os -import matplotlib -import torch -from torch.nn.utils import weight_norm -# matplotlib.use("Agg") -import matplotlib.pylab as plt - - -def plot_spectrogram(spectrogram): - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - - fig.canvas.draw() - plt.close() - - return fig - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def apply_weight_norm(m): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - weight_norm(m) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def load_checkpoint(filepath, device): - assert os.path.isfile(filepath) - print("Loading '{}'".format(filepath)) - checkpoint_dict = torch.load(filepath, map_location=device) - print("Complete.") - return checkpoint_dict - - -def save_checkpoint(filepath, obj): - print("Saving checkpoint to {}".format(filepath)) - torch.save(obj, filepath) - print("Complete.") - - -def del_old_checkpoints(cp_dir, prefix, n_models=2): - pattern = os.path.join(cp_dir, prefix + '????????') - cp_list = glob.glob(pattern) # get checkpoint paths - cp_list = sorted(cp_list)# sort by iter - if len(cp_list) > n_models: # if more than n_models models are found - for cp in cp_list[:-n_models]:# delete the oldest models other than lastest n_models - open(cp, 'w').close()# empty file contents - os.unlink(cp)# delete file (move to trash when using Colab) - - -def scan_checkpoint(cp_dir, prefix): - pattern = os.path.join(cp_dir, prefix + '????????') - cp_list = glob.glob(pattern) - if len(cp_list) == 0: - return None - return sorted(cp_list)[-1] - diff --git a/spaces/HaHaBill/LandShapes-Antarctica/models/stylegan2/stylegan2-pytorch/op/fused_bias_act.cpp b/spaces/HaHaBill/LandShapes-Antarctica/models/stylegan2/stylegan2-pytorch/op/fused_bias_act.cpp deleted file mode 100644 index 02be898f970bcc8ea297867fcaa4e71b24b3d949..0000000000000000000000000000000000000000 --- a/spaces/HaHaBill/LandShapes-Antarctica/models/stylegan2/stylegan2-pytorch/op/fused_bias_act.cpp +++ /dev/null @@ -1,21 +0,0 @@ -#include - - -torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer, - int act, int grad, float alpha, float scale); - -#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") -#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") -#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) - -torch::Tensor fused_bias_act(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer, - int act, int grad, float alpha, float scale) { - CHECK_CUDA(input); - CHECK_CUDA(bias); - - return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale); -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)"); -} \ No newline at end of file diff --git a/spaces/HaHaBill/LandShapes-Antarctica/netdissect/segmodel/resnext.py b/spaces/HaHaBill/LandShapes-Antarctica/netdissect/segmodel/resnext.py deleted file mode 100644 index cdbb7461a6c8eb126717967cdca5d5ce392aecea..0000000000000000000000000000000000000000 --- a/spaces/HaHaBill/LandShapes-Antarctica/netdissect/segmodel/resnext.py +++ /dev/null @@ -1,182 +0,0 @@ -import os -import sys -import torch -import torch.nn as nn -import math -try: - from lib.nn import SynchronizedBatchNorm2d -except ImportError: - from torch.nn import BatchNorm2d as SynchronizedBatchNorm2d -try: - from urllib import urlretrieve -except ImportError: - from urllib.request import urlretrieve - - -__all__ = ['ResNeXt', 'resnext101'] # support resnext 101 - - -model_urls = { - #'resnext50': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnext50-imagenet.pth', - 'resnext101': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnext101-imagenet.pth' -} - - -def conv3x3(in_planes, out_planes, stride=1): - "3x3 convolution with padding" - return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, - padding=1, bias=False) - - -class GroupBottleneck(nn.Module): - expansion = 2 - - def __init__(self, inplanes, planes, stride=1, groups=1, downsample=None): - super(GroupBottleneck, self).__init__() - self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) - self.bn1 = SynchronizedBatchNorm2d(planes) - self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, - padding=1, groups=groups, bias=False) - self.bn2 = SynchronizedBatchNorm2d(planes) - self.conv3 = nn.Conv2d(planes, planes * 2, kernel_size=1, bias=False) - self.bn3 = SynchronizedBatchNorm2d(planes * 2) - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.relu(out) - - return out - - -class ResNeXt(nn.Module): - - def __init__(self, block, layers, groups=32, num_classes=1000): - self.inplanes = 128 - super(ResNeXt, self).__init__() - self.conv1 = conv3x3(3, 64, stride=2) - self.bn1 = SynchronizedBatchNorm2d(64) - self.relu1 = nn.ReLU(inplace=True) - self.conv2 = conv3x3(64, 64) - self.bn2 = SynchronizedBatchNorm2d(64) - self.relu2 = nn.ReLU(inplace=True) - self.conv3 = conv3x3(64, 128) - self.bn3 = SynchronizedBatchNorm2d(128) - self.relu3 = nn.ReLU(inplace=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - - self.layer1 = self._make_layer(block, 128, layers[0], groups=groups) - self.layer2 = self._make_layer(block, 256, layers[1], stride=2, groups=groups) - self.layer3 = self._make_layer(block, 512, layers[2], stride=2, groups=groups) - self.layer4 = self._make_layer(block, 1024, layers[3], stride=2, groups=groups) - self.avgpool = nn.AvgPool2d(7, stride=1) - self.fc = nn.Linear(1024 * block.expansion, num_classes) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels // m.groups - m.weight.data.normal_(0, math.sqrt(2. / n)) - elif isinstance(m, SynchronizedBatchNorm2d): - m.weight.data.fill_(1) - m.bias.data.zero_() - - def _make_layer(self, block, planes, blocks, stride=1, groups=1): - downsample = None - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - nn.Conv2d(self.inplanes, planes * block.expansion, - kernel_size=1, stride=stride, bias=False), - SynchronizedBatchNorm2d(planes * block.expansion), - ) - - layers = [] - layers.append(block(self.inplanes, planes, stride, groups, downsample)) - self.inplanes = planes * block.expansion - for i in range(1, blocks): - layers.append(block(self.inplanes, planes, groups=groups)) - - return nn.Sequential(*layers) - - def forward(self, x): - x = self.relu1(self.bn1(self.conv1(x))) - x = self.relu2(self.bn2(self.conv2(x))) - x = self.relu3(self.bn3(self.conv3(x))) - x = self.maxpool(x) - - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - - x = self.avgpool(x) - x = x.view(x.size(0), -1) - x = self.fc(x) - - return x - - -''' -def resnext50(pretrained=False, **kwargs): - """Constructs a ResNet-50 model. - - Args: - pretrained (bool): If True, returns a model pre-trained on Places - """ - model = ResNeXt(GroupBottleneck, [3, 4, 6, 3], **kwargs) - if pretrained: - model.load_state_dict(load_url(model_urls['resnext50']), strict=False) - return model -''' - - -def resnext101(pretrained=False, **kwargs): - """Constructs a ResNet-101 model. - - Args: - pretrained (bool): If True, returns a model pre-trained on Places - """ - model = ResNeXt(GroupBottleneck, [3, 4, 23, 3], **kwargs) - if pretrained: - model.load_state_dict(load_url(model_urls['resnext101']), strict=False) - return model - - -# def resnext152(pretrained=False, **kwargs): -# """Constructs a ResNeXt-152 model. -# -# Args: -# pretrained (bool): If True, returns a model pre-trained on Places -# """ -# model = ResNeXt(GroupBottleneck, [3, 8, 36, 3], **kwargs) -# if pretrained: -# model.load_state_dict(load_url(model_urls['resnext152'])) -# return model - - -def load_url(url, model_dir='./pretrained', map_location=None): - if not os.path.exists(model_dir): - os.makedirs(model_dir) - filename = url.split('/')[-1] - cached_file = os.path.join(model_dir, filename) - if not os.path.exists(cached_file): - sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file)) - urlretrieve(url, cached_file) - return torch.load(cached_file, map_location=map_location) diff --git a/spaces/Hallucinate/demo/taming/modules/misc/coord.py b/spaces/Hallucinate/demo/taming/modules/misc/coord.py deleted file mode 100644 index ee69b0c897b6b382ae673622e420f55e494f5b09..0000000000000000000000000000000000000000 --- a/spaces/Hallucinate/demo/taming/modules/misc/coord.py +++ /dev/null @@ -1,31 +0,0 @@ -import torch - -class CoordStage(object): - def __init__(self, n_embed, down_factor): - self.n_embed = n_embed - self.down_factor = down_factor - - def eval(self): - return self - - def encode(self, c): - """fake vqmodel interface""" - assert 0.0 <= c.min() and c.max() <= 1.0 - b,ch,h,w = c.shape - assert ch == 1 - - c = torch.nn.functional.interpolate(c, scale_factor=1/self.down_factor, - mode="area") - c = c.clamp(0.0, 1.0) - c = self.n_embed*c - c_quant = c.round() - c_ind = c_quant.to(dtype=torch.long) - - info = None, None, c_ind - return c_quant, None, info - - def decode(self, c): - c = c/self.n_embed - c = torch.nn.functional.interpolate(c, scale_factor=self.down_factor, - mode="nearest") - return c diff --git a/spaces/HamidRezaAttar/gpt2-home/utils.py b/spaces/HamidRezaAttar/gpt2-home/utils.py deleted file mode 100644 index d27918de6777f9d582b7bee37cd0f1b59a9ff2d2..0000000000000000000000000000000000000000 --- a/spaces/HamidRezaAttar/gpt2-home/utils.py +++ /dev/null @@ -1,36 +0,0 @@ -import streamlit as st -import json -from PIL import Image - - -def load_image(image_path, image_resize=None): - image = Image.open(image_path) - if isinstance(image_resize, tuple): - image.resize(image_resize) - return image - - -def load_text(text_path): - text = '' - with open(text_path) as f: - text = f.read() - - return text - - -def load_json(json_path): - jdata = '' - with open(json_path) as f: - jdata = json.load(f) - - return jdata - - -def local_css(css_path): - with open(css_path) as f: - st.markdown(f'', unsafe_allow_html=True) - - -def remote_css(css_url): - st.markdown(f'', unsafe_allow_html=True) - diff --git a/spaces/Harveenchadha/Vakyansh-Odia-TTS/ttsv/src/glow_tts/monotonic_align/setup.py b/spaces/Harveenchadha/Vakyansh-Odia-TTS/ttsv/src/glow_tts/monotonic_align/setup.py deleted file mode 100644 index 3a3892f92e3fbb866e3111199a9a4cf1f88e3959..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Vakyansh-Odia-TTS/ttsv/src/glow_tts/monotonic_align/setup.py +++ /dev/null @@ -1,23 +0,0 @@ -import numpy -from setuptools import Extension, find_packages -from distutils.core import setup -from Cython.Build import cythonize - - -_VERSION = "1.1" - - -ext_modules = cythonize( - "monotonic_align/core.pyx", - compiler_directives={"language_level": "3"}, -) - -setup( - name="monotonic_align", - ext_modules=ext_modules, - include_dirs=[numpy.get_include(), "monotonic_align"], - packages=find_packages(), - setup_requires=["numpy", "cython"], - install_requires=["numpy"], - version=_VERSION, -) diff --git a/spaces/Hc123/anime-remove-background/README.md b/spaces/Hc123/anime-remove-background/README.md deleted file mode 100644 index 1ba3cb5ea0e994e246d57b7d62b8aa5a6331901c..0000000000000000000000000000000000000000 --- a/spaces/Hc123/anime-remove-background/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Anime Remove Background -emoji: 🪄🖼️ -colorFrom: indigo -colorTo: pink -sdk: gradio -sdk_version: 3.1.4 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: skytnt/anime-remove-background ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Hoodady/3DFuse/my/utils/event.py b/spaces/Hoodady/3DFuse/my/utils/event.py deleted file mode 100644 index 741ab144fef51eef800dc7a03208059675ee8860..0000000000000000000000000000000000000000 --- a/spaces/Hoodady/3DFuse/my/utils/event.py +++ /dev/null @@ -1,143 +0,0 @@ -# design inspiration from detectron2 -from pathlib import Path -import json -import os -from contextlib import contextmanager -from .ticker import IntervalTicker - - -_CURRENT_STORAGE_STACK = [] - - -def get_event_storage(): - """ - Returns: - The :class:`EventStorage` object that's currently being used. - Throws an error if no :class:`EventStorage` is currently enabled. - """ - assert len( - _CURRENT_STORAGE_STACK - ), "get_event_storage() has to be called inside a 'with EventStorage(...)' context!" - return _CURRENT_STORAGE_STACK[-1] - - -def read_lined_json(fname): - with Path(fname).open('r') as f: - for line in f: - item = json.loads(line) - yield item - - -def read_stats(dirname, key): - if dirname is None or not (fname := Path(dirname) / "history.json").is_file(): - return [], [] - stats = read_lined_json(fname) - stats = list(filter(lambda x: key in x, stats)) - xs = [e['iter'] for e in stats] - ys = [e[key] for e in stats] - return xs, ys - - -class EventStorage(): - def __init__(self, output_dir="./hotdog", start_iter=0, flush_period=60): - self.iter = start_iter - self.ticker = IntervalTicker(flush_period) - self.history = [] - self._current_prefix = "" - self._init_curr_buffer_() - - self.output_dir = output_dir - self.writable = False - - def _open(self): - if self.writable: - output_dir = Path(self.output_dir) - if not output_dir.is_dir(): - output_dir.mkdir(parents=True, exist_ok=True) - json_fname = output_dir / 'history.json' - - self._file_handle = json_fname.open('a', encoding='utf8') - self.output_dir = output_dir # make sure it's a path object - - def _init_curr_buffer_(self): - self.curr_buffer = {'iter': self.iter} - - def step(self, flush=False): - self.history.append(self.curr_buffer) - - on_flush_period = self.ticker.tick() - if flush or on_flush_period: - self.flush_history() - - self.iter += 1 - self._init_curr_buffer_() - - def flush_history(self): - if self.writable: - for item in self.history: - line = json.dumps(item, sort_keys=True, ensure_ascii=False) + "\n" - self._file_handle.write(line) - self._file_handle.flush() - self.history = [] - - def full_key(self, key): - assert isinstance(key, str) - name = self._current_prefix + key - return name - - def put(self, key, val): - key = self.full_key(key) - assert isinstance(val, (int, float, str)) - if isinstance(val, float): - val = round(val, 3) - self.curr_buffer[key] = val - - def put_scalars(self, **kwargs): - for k, v in kwargs.items(): - self.put(k, v) - - def put_artifact(self, key, ext,p, save_func): - if not self.writable: - return - p=p.replace(" ","_") - os.makedirs(self.output_dir / key, exist_ok=True) - fname = (self.output_dir / key / f"step_{self.iter}_{p}").with_suffix(ext) - fname = str(fname) - - # must be called inside so that - # 1. the func is not executed if the metric is not writable - # 2. the key is only inserted if the func succeeds - save_func(fname) - self.put(key, fname) - return fname - - def close(self): - self.flush_history() - if self.writable: - self._file_handle.close() - - def get_last(self): - if len(self.history) > 0: - last = self.history[-1] - return last - - def __enter__(self): - if len(_CURRENT_STORAGE_STACK) > 0: - parent = _CURRENT_STORAGE_STACK[-1] - root, dirname = parent.output_dir, self.output_dir - if root is not None and dirname is not None: - child_dir = parent.output_dir / f"{self.output_dir}_{parent.iter}" - self.output_dir = child_dir - parent.put(str(dirname), str(child_dir)) - - if self.output_dir is not None: - self.writable = True - self._open() - - _CURRENT_STORAGE_STACK.append(self) - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - assert _CURRENT_STORAGE_STACK[-1] == self - _CURRENT_STORAGE_STACK.pop() - self.close() diff --git a/spaces/ICML2022/OFA/fairseq/examples/hubert/simple_kmeans/dump_hubert_feature.py b/spaces/ICML2022/OFA/fairseq/examples/hubert/simple_kmeans/dump_hubert_feature.py deleted file mode 100644 index 5c7b67f8b1967ca515c5f7606253b46f903ea37e..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/hubert/simple_kmeans/dump_hubert_feature.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -import os -import sys - -import fairseq -import soundfile as sf -import torch -import torch.nn.functional as F - -from feature_utils import get_path_iterator, dump_feature - - -logging.basicConfig( - format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", - datefmt="%Y-%m-%d %H:%M:%S", - level=os.environ.get("LOGLEVEL", "INFO").upper(), - stream=sys.stdout, -) -logger = logging.getLogger("dump_hubert_feature") - - -class HubertFeatureReader(object): - def __init__(self, ckpt_path, layer, max_chunk=1600000): - ( - model, - cfg, - task, - ) = fairseq.checkpoint_utils.load_model_ensemble_and_task([ckpt_path]) - self.model = model[0].eval().cuda() - self.task = task - self.layer = layer - self.max_chunk = max_chunk - logger.info(f"TASK CONFIG:\n{self.task.cfg}") - logger.info(f" max_chunk = {self.max_chunk}") - - def read_audio(self, path, ref_len=None): - wav, sr = sf.read(path) - assert sr == self.task.cfg.sample_rate, sr - if wav.ndim == 2: - wav = wav.mean(-1) - assert wav.ndim == 1, wav.ndim - if ref_len is not None and abs(ref_len - len(wav)) > 160: - logging.warning(f"ref {ref_len} != read {len(wav)} ({path})") - return wav - - def get_feats(self, path, ref_len=None): - x = self.read_audio(path, ref_len) - with torch.no_grad(): - x = torch.from_numpy(x).float().cuda() - if self.task.cfg.normalize: - x = F.layer_norm(x, x.shape) - x = x.view(1, -1) - - feat = [] - for start in range(0, x.size(1), self.max_chunk): - x_chunk = x[:, start: start + self.max_chunk] - feat_chunk, _ = self.model.extract_features( - source=x_chunk, - padding_mask=None, - mask=False, - output_layer=self.layer, - ) - feat.append(feat_chunk) - return torch.cat(feat, 1).squeeze(0) - - -def main(tsv_dir, split, ckpt_path, layer, nshard, rank, feat_dir, max_chunk): - reader = HubertFeatureReader(ckpt_path, layer, max_chunk) - generator, num = get_path_iterator(f"{tsv_dir}/{split}.tsv", nshard, rank) - dump_feature(reader, generator, num, split, nshard, rank, feat_dir) - - -if __name__ == "__main__": - import argparse - - parser = argparse.ArgumentParser() - parser.add_argument("tsv_dir") - parser.add_argument("split") - parser.add_argument("ckpt_path") - parser.add_argument("layer", type=int) - parser.add_argument("nshard", type=int) - parser.add_argument("rank", type=int) - parser.add_argument("feat_dir") - parser.add_argument("--max_chunk", type=int, default=1600000) - args = parser.parse_args() - logger.info(args) - - main(**vars(args)) diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/tasks/audio_finetuning.py b/spaces/ICML2022/OFA/fairseq/fairseq/tasks/audio_finetuning.py deleted file mode 100644 index 4ef87c604f00581f03075e9ebe10a43dd51d6e45..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/tasks/audio_finetuning.py +++ /dev/null @@ -1,346 +0,0 @@ -# Copyright (c) 2017-present, Facebook, Inc. -# All rights reserved. -# -# This source code is licensed under the license found in the LICENSE file in -# the root directory of this source tree. An additional grant of patent rights -# can be found in the PATENTS file in the same directory. - -import logging -import os -import torch -import json - -from argparse import Namespace -from dataclasses import dataclass, field -from typing import Optional, Any - -from fairseq.data import AddTargetDataset, Dictionary, encoders -from fairseq.tasks.audio_pretraining import AudioPretrainingTask, AudioPretrainingConfig -from fairseq.dataclass import FairseqDataclass -from fairseq.dataclass.configs import GenerationConfig -from fairseq.data.text_compressor import TextCompressor, TextCompressionLevel - -from . import register_task -from .. import utils -from ..logging import metrics - - -logger = logging.getLogger(__name__) - - -class LabelEncoder(object): - def __init__(self, dictionary): - self.dictionary = dictionary - - def __call__(self, label): - return self.dictionary.encode_line( - label, append_eos=False, add_if_not_exist=False - ) - - -def label_len_fn(label): - return len(label.split(" ")) - - -@dataclass -class AudioFinetuningConfig(AudioPretrainingConfig): - # Options for reporting WER metrics during validation. Only applicable to - # Seq2Seq models during fine-tuning - eval_wer: bool = field( - default=False, metadata={"help": "compute WER for Seq2Seq models"} - ) - eval_wer_config: GenerationConfig = field( - default_factory=lambda: GenerationConfig(), - metadata={"help": "beam search config for evaluating wer during training"}, - ) - eval_wer_tokenizer: Any = field( - default=None, - metadata={"help": "tokenizer config for evaluating wer during training"}, - ) - eval_wer_post_process: str = field( - default="letter", - metadata={ - "help": "remove BPE tokens before scoring (can be sentencepiece, letter, and more)" - }, - ) - eval_bleu: bool = field( - default=False, metadata={"help": "evaluation with BLEU scores"} - ) - eval_bleu_detok: Optional[str] = field( - default=None, metadata={ - "help": "detokenize before computing BLEU (e.g., 'moses'); " - "required if using --eval-bleu; use 'space' to disable " - "detokenization; see fairseq.data.encoders for other options" - } - ) - eval_bleu_detok_args: str = field( - default="{}", - metadata={"help": "args for building the tokenizer, if needed"} - ) - eval_tokenized_bleu: bool = field( - default=False, - metadata={"help": "compute tokenized BLEU instead of sacrebleu"} - ) - eval_bleu_remove_bpe: Optional[str] = field( - default=None, metadata={"help": "remove BPE before computing BLEU"} - ) - eval_bleu_args: str = field( - default="{}", - metadata={"help": "generation args for BLUE scoring, e.g., " - "'{\"beam\": 4, \"lenpen\": 0.6}'"} - ) - eval_bleu_print_samples: bool = field( - default=False, - metadata={"help": "print sample generations during validation"} - ) - autoregressive: bool = field( - default=False, - metadata={ - "help": "required for autoregressive decoders (like seq2seq models); " - "adds 'prev_output_tokens' to input and appends eos to target" - }, - ) - - -@register_task("audio_finetuning", dataclass=AudioFinetuningConfig) -class AudioFinetuningTask(AudioPretrainingTask): - """ """ - - cfg: AudioFinetuningConfig - - def __init__( - self, - cfg: AudioFinetuningConfig, - ): - super().__init__(cfg) - self.blank_symbol = "" - - self.state.add_factory("target_dictionary", self.load_target_dictionary) - - def load_target_dictionary(self): - if self.cfg.labels: - dict_path = os.path.join(self.cfg.data, f"dict.{self.cfg.labels}.txt") - return Dictionary.load(dict_path) - return None - - def load_dataset(self, split: str, task_cfg: AudioFinetuningConfig = None, **kwargs): - super().load_dataset(split, task_cfg, **kwargs) - - task_cfg = task_cfg or self.cfg - assert task_cfg.labels is not None - text_compression_level = getattr( - TextCompressionLevel, str(self.cfg.text_compression_level) - ) - data_path = self.cfg.data - label_path = os.path.join(data_path, f"{split}.{task_cfg.labels}") - skipped_indices = getattr(self.datasets[split], "skipped_indices", set()) - text_compressor = TextCompressor(level=text_compression_level) - with open(label_path, "r") as f: - labels = [ - text_compressor.compress(l) - for i, l in enumerate(f) if i not in skipped_indices - ] - - assert len(labels) == len(self.datasets[split]), ( - f"labels length ({len(labels)}) and dataset length " - f"({len(self.datasets[split])}) do not match" - ) - - process_label = LabelEncoder(self.target_dictionary) - - self.datasets[split] = AddTargetDataset( - self.datasets[split], - labels, - pad=self.target_dictionary.pad(), - eos=self.target_dictionary.eos(), - batch_targets=True, - process_label=process_label, - label_len_fn=label_len_fn, - add_to_input=task_cfg.get("autoregressive", False), - text_compression_level=text_compression_level - ) - - @property - def target_dictionary(self): - """Return the :class:`~fairseq.data.Dictionary` for the language - model.""" - return self.state.target_dictionary - - def valid_step(self, sample, model, criterion): - loss, sample_size, logging_output = super().valid_step(sample, model, criterion) - if self.cfg.eval_wer and self.cfg.autoregressive: - metrics = self._inference_with_wer(self.sequence_generator, sample, model) - logging_output["_num_char_errors"] = metrics["num_char_errors"] - logging_output["_num_chars"] = metrics["num_chars"] - logging_output["_num_word_errors"] = metrics["num_word_errors"] - logging_output["_num_words"] = metrics["num_words"] - if self.cfg.eval_bleu and self.cfg.autoregressive: - metrics = self._inference_with_bleu(self.sequence_generator, sample, model) - logging_output['_bleu_sys_len'] = metrics.sys_len - logging_output['_bleu_ref_len'] = metrics.ref_len - # we split counts into separate entries so that they can be - # summed efficiently across workers using fast-stat-sync - assert len(metrics.counts) == 4 - for i in range(4): - logging_output[f"_bleu_counts_{i}"] = metrics.counts[i] - logging_output[f"_bleu_totals_{i}"] = metrics.totals[i] - return loss, sample_size, logging_output - - def build_model(self, model_cfg: FairseqDataclass): - model = super().build_model(model_cfg) - - if self.cfg.eval_wer and self.cfg.autoregressive: - self.sequence_generator = self.build_generator( - [model], - self.cfg.eval_wer_config, - ) - if self.cfg.eval_wer_tokenizer: - self.tokenizer = encoders.build_tokenizer(self.cfg.eval_wer_tokenizer) - else: - self.tokenizer = None - if self.cfg.eval_bleu and self.cfg.autoregressive: - assert self.cfg.eval_bleu_detok is not None, ( - '--eval-bleu-detok is required if using --eval-bleu; ' - 'try --eval-bleu-detok=moses (or --eval-bleu-detok=space ' - 'to disable detokenization, e.g., when using sentencepiece)' - ) - detok_args = json.loads(self.cfg.eval_bleu_detok_args) - self.tokenizer = encoders.build_tokenizer( - Namespace(tokenizer=self.cfg.eval_bleu_detok, **detok_args) - ) - gen_args = json.loads(self.cfg.eval_bleu_args) - gen_args = Namespace(**gen_args) - self.sequence_generator = self.build_generator([model], gen_args) - - return model - - def _inference_with_wer(self, generator, sample, model): - import editdistance - - def decode(toks): - s = self.target_dictionary.string( - toks.int().cpu(), - self.cfg.eval_wer_post_process, - escape_unk=True, - ) - if self.tokenizer: - s = self.tokenizer.decode(s) - return s - - num_word_errors, num_char_errors = 0, 0 - num_chars, num_words = 0, 0 - gen_out = self.inference_step(generator, [model], sample, None) - for i in range(len(gen_out)): - hyp = decode(gen_out[i][0]["tokens"]) - ref = decode( - utils.strip_pad(sample["target"][i], self.target_dictionary.pad()), - ) - num_char_errors += editdistance.eval(hyp, ref) - num_chars += len(ref) - hyp_words = hyp.split() - ref_words = ref.split() - num_word_errors += editdistance.eval(hyp_words, ref_words) - num_words += len(ref_words) - - return { - "num_char_errors": num_char_errors, - "num_chars": num_chars, - "num_word_errors": num_word_errors, - "num_words": num_words, - } - - def _inference_with_bleu(self, generator, sample, model): - import sacrebleu - - def decode(toks, is_ref): - s = self.target_dictionary.string( - toks.int().cpu(), - self.cfg.eval_bleu_remove_bpe, - # The default unknown string in fairseq is ``, but - # this is tokenized by sacrebleu as `< unk >`, inflating - # BLEU scores. Instead, we use a somewhat more verbose - # alternative that is unlikely to appear in the real - # reference, but doesn't get split into multiple tokens. - unk_string=( - "UNKNOWNTOKENINREF" if is_ref else "UNKNOWNTOKENINHYP" - ), - ) - if self.tokenizer: - s = self.tokenizer.decode(s) - return s - - gen_out = self.inference_step(generator, [model], sample) - hyps, refs = [], [] - for i in range(len(gen_out)): - hyps.append(decode(gen_out[i][0]['tokens'], is_ref=False)) - refs.append( - decode( - utils.strip_pad( - sample['target'][i], - self.target_dictionary.pad() - ), - is_ref=True, # don't count as matches to the hypo - ) - ) - if self.cfg.eval_bleu_print_samples: - logger.info('H-{} {}'.format(sample["id"][0], hyps[0])) - logger.info('T-{} {}'.format(sample["id"][0], refs[0])) - - eval_tokenization = 'none' if self.cfg.eval_tokenized_bleu else '13a' - return sacrebleu.corpus_bleu(hyps, [refs], tokenize=eval_tokenization) - - def reduce_metrics(self, logging_outputs, criterion): - super().reduce_metrics(logging_outputs, criterion) - - if self.cfg.eval_wer: - zero = torch.scalar_tensor(0.0) - num_char_errors = sum( - log.get("_num_char_errors", zero) for log in logging_outputs - ) - num_chars = sum(log.get("_num_chars", zero) for log in logging_outputs) - num_word_errors = sum( - log.get("_num_word_errors", zero) for log in logging_outputs - ) - num_words = sum(log.get("_num_words", zero) for log in logging_outputs) - metrics.log_scalar("_num_char_errors", num_char_errors) - metrics.log_scalar("_num_chars", num_chars) - metrics.log_scalar("_num_word_errors", num_word_errors) - metrics.log_scalar("_num_words", num_words) - if num_chars > 0: - metrics.log_derived( - "uer", - lambda meters: meters["_num_char_errors"].sum - * 100.0 - / meters["_num_chars"].sum - if meters["_num_chars"].sum > 0 - else float("nan"), - ) - if num_words > 0: - metrics.log_derived( - "wer", - lambda meters: meters["_num_word_errors"].sum - * 100.0 - / meters["_num_words"].sum - if meters["_num_words"].sum > 0 - else float("nan"), - ) - if self.cfg.eval_bleu: - len_keys = ["_bleu_sys_len", "_bleu_ref_len"] - count_keys = [f"_bleu_counts_{i}" for i in range(4)] - total_keys = [f"_bleu_totals_{i}" for i in range(4)] - for k in len_keys + count_keys + total_keys: - metrics.log_scalar( - k, sum(log.get(k, 0) for log in logging_outputs) - ) - - import sacrebleu - metrics.log_derived( - 'bleu', - lambda meters: sacrebleu.compute_bleu( - correct=[meters[k].sum for k in count_keys], - total=[meters[k].sum for k in total_keys], - sys_len=meters['_bleu_sys_len'].sum, - ref_len=meters['_bleu_ref_len'].sum, - smooth_method="exp" - ).score - ) diff --git a/spaces/IwanK/heart_failuere/app.py b/spaces/IwanK/heart_failuere/app.py deleted file mode 100644 index 4ac489093f1ea479623cc30e0715f666d4c51c04..0000000000000000000000000000000000000000 --- a/spaces/IwanK/heart_failuere/app.py +++ /dev/null @@ -1,16 +0,0 @@ -import streamlit as st -import eda -import predict - -st.set_page_config( - page_title= 'Heart Failure Prediction', - layout= 'wide', - initial_sidebar_state='expanded' -) - -navigation = st.sidebar.selectbox('Pilihan Halaman : ', ('EDA', 'Predic a Patient')) - -if navigation == 'EDA': - eda.run() -else: - predict.run() diff --git a/spaces/JMalott/ai_architecture/clip/clip.py b/spaces/JMalott/ai_architecture/clip/clip.py deleted file mode 100644 index 9f61f1bd1780508e6fee27f21e13cf3cc9d8eae5..0000000000000000000000000000000000000000 --- a/spaces/JMalott/ai_architecture/clip/clip.py +++ /dev/null @@ -1,231 +0,0 @@ -import hashlib -import os -import urllib -import warnings -from typing import Any, Union, List -from pkg_resources import packaging - -import torch -from PIL import Image -from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize -from tqdm import tqdm - -from .model import build_model -from .simple_tokenizer import SimpleTokenizer as _Tokenizer - -try: - from torchvision.transforms import InterpolationMode - BICUBIC = InterpolationMode.BICUBIC -except ImportError: - BICUBIC = Image.BICUBIC - - -if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"): - warnings.warn("PyTorch version 1.7.1 or higher is recommended") - - -__all__ = ["available_models", "load", "tokenize"] -_tokenizer = _Tokenizer() - -_MODELS = { - "RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt", - "RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt", - "RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt", - "RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt", - "RN50x64": "https://openaipublic.azureedge.net/clip/models/be1cfb55d75a9666199fb2206c106743da0f6468c9d327f3e0d0a543a9919d9c/RN50x64.pt", - "ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", - "ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt", - "ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt", - "ViT-L/14@336px": "https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt", -} - - -def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")): - os.makedirs(root, exist_ok=True) - filename = os.path.basename(url) - - expected_sha256 = url.split("/")[-2] - download_target = os.path.join(root, filename) - - if os.path.exists(download_target) and not os.path.isfile(download_target): - raise RuntimeError(f"{download_target} exists and is not a regular file") - - if os.path.isfile(download_target): - if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256: - return download_target - else: - warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file") - - with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: - with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop: - while True: - buffer = source.read(8192) - if not buffer: - break - - output.write(buffer) - loop.update(len(buffer)) - - if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256: - raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match") - - return download_target - - -def _convert_image_to_rgb(image): - return image.convert("RGB") - - -def _transform(n_px): - return Compose([ - Resize(n_px, interpolation=BICUBIC), - CenterCrop(n_px), - _convert_image_to_rgb, - ToTensor(), - Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), - ]) - - -def available_models() -> List[str]: - """Returns the names of available CLIP models""" - return list(_MODELS.keys()) - - -def load(name: str, device: Union[str, torch.device] = None, jit=False): - """Load a CLIP model - - Parameters - ---------- - name : str - A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict - - device : Union[str, torch.device] - The device to put the loaded model - - jit : bool - Whether to load the optimized JIT model or more hackable non-JIT model (default). - - Returns - ------- - model : torch.nn.Module - The CLIP model - - preprocess : Callable[[PIL.Image], torch.Tensor] - A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input - """ - if device is None: - device = "cuda" if torch.cuda.is_available() else "cpu" - if name in _MODELS: - model_path = _download(_MODELS[name]) - elif os.path.isfile(name): - model_path = name - else: - raise RuntimeError(f"Model {name} not found; available models = {available_models()}") - - try: - # loading JIT archive - model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval() - state_dict = None - except RuntimeError: - # loading saved state dict - if jit: - warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead") - jit = False - state_dict = torch.load(model_path, map_location="cpu") - - if not jit: - model = build_model(state_dict or model.state_dict()).to(device) - if str(device) == "cpu": - model.float() - return model, _transform(model.visual.input_resolution) - - # patch the device names - device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]) - device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1] - - def patch_device(module): - try: - graphs = [module.graph] if hasattr(module, "graph") else [] - except RuntimeError: - graphs = [] - - if hasattr(module, "forward1"): - graphs.append(module.forward1.graph) - - for graph in graphs: - for node in graph.findAllNodes("prim::Constant"): - if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"): - node.copyAttributes(device_node) - - model.apply(patch_device) - patch_device(model.encode_image) - patch_device(model.encode_text) - - # patch dtype to float32 on CPU - if str(device) == "cpu": - float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[]) - float_input = list(float_holder.graph.findNode("aten::to").inputs())[1] - float_node = float_input.node() - - def patch_float(module): - try: - graphs = [module.graph] if hasattr(module, "graph") else [] - except RuntimeError: - graphs = [] - - if hasattr(module, "forward1"): - graphs.append(module.forward1.graph) - - for graph in graphs: - for node in graph.findAllNodes("aten::to"): - inputs = list(node.inputs()) - for i in [1, 2]: # dtype can be the second or third argument to aten::to() - if inputs[i].node()["value"] == 5: - inputs[i].node().copyAttributes(float_node) - - model.apply(patch_float) - patch_float(model.encode_image) - patch_float(model.encode_text) - - model.float() - - return model, _transform(model.input_resolution.item()) - - -def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False) -> torch.LongTensor: - """ - Returns the tokenized representation of given input string(s) - - Parameters - ---------- - texts : Union[str, List[str]] - An input string or a list of input strings to tokenize - - context_length : int - The context length to use; all CLIP models use 77 as the context length - - truncate: bool - Whether to truncate the text in case its encoding is longer than the context length - - Returns - ------- - A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length] - """ - if isinstance(texts, str): - texts = [texts] - - sot_token = _tokenizer.encoder["<|startoftext|>"] - eot_token = _tokenizer.encoder["<|endoftext|>"] - all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts] - result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) - - for i, tokens in enumerate(all_tokens): - if len(tokens) > context_length: - if truncate: - tokens = tokens[:context_length] - tokens[-1] = eot_token - else: - raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}") - result[i, :len(tokens)] = torch.tensor(tokens) - - return result diff --git a/spaces/JUNGU/cartoonizer-demo-onnx-sota/app.py b/spaces/JUNGU/cartoonizer-demo-onnx-sota/app.py deleted file mode 100644 index 8c1ac83e1085402a42a07aa26c982c0390370458..0000000000000000000000000000000000000000 --- a/spaces/JUNGU/cartoonizer-demo-onnx-sota/app.py +++ /dev/null @@ -1,54 +0,0 @@ -import os - -import cv2 -import gradio as gr -import numpy as np -import onnxruntime as ort -from PIL import Image - -_sess_options = ort.SessionOptions() -_sess_options.intra_op_num_threads = os.cpu_count() -MODEL_SESS = ort.InferenceSession( - "cartoonizer.onnx", _sess_options, providers=["CPUExecutionProvider"] -) - - -def preprocess_image(image: Image) -> np.ndarray: - image = np.array(image) - image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) - - h, w, c = np.shape(image) - if min(h, w) > 720: - if h > w: - h, w = int(720 * h / w), 720 - else: - h, w = 720, int(720 * w / h) - image = cv2.resize(image, (w, h), interpolation=cv2.INTER_AREA) - h, w = (h // 8) * 8, (w // 8) * 8 - image = image[:h, :w, :] - image = image.astype(np.float32) / 127.5 - 1 - return np.expand_dims(image, axis=0) - - -def inference(image: np.ndarray) -> Image: - image = preprocess_image(image) - results = MODEL_SESS.run(None, {"input_photo:0": image}) - output = (np.squeeze(results[0]) + 1.0) * 127.5 - output = np.clip(output, 0, 255).astype(np.uint8) - output = cv2.cvtColor(output, cv2.COLOR_BGR2RGB) - return Image.fromarray(output) - - -title = "Generate cartoonized images" -article = "Demo of CartoonGAN model (https://systemerrorwang.github.io/White-box-Cartoonization/). \nDemo image is from https://unsplash.com/photos/f0SgAs27BYI." - -iface = gr.Interface( - inference, - inputs=gr.inputs.Image(type="pil", label="Input Image"), - outputs="image", - title=title, - article=article, - allow_flagging="never", - examples=[["mountain.jpeg"]], -) -iface.launch() diff --git a/spaces/JacobLinCool/tiktoken-calculator/README.md b/spaces/JacobLinCool/tiktoken-calculator/README.md deleted file mode 100644 index 3348630c213f13b5c4a6161d9bd1e8e1c0836ca5..0000000000000000000000000000000000000000 --- a/spaces/JacobLinCool/tiktoken-calculator/README.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Tiktoken Calculator -emoji: 🤗 -colorFrom: yellow -colorTo: orange -sdk: gradio -python_version: 3.11 -app_file: app.py -pinned: false -tags: ["gpt", "tiktoken", "tokenizer"] ---- - -# Tiktoken Calculator - -Calculate the token count for GPT-4, GPT-3.5, GPT-3, and GPT-2. - -> It uses [openai/tiktoken](https://github.com/openai/tiktoken) to calculate the token count. - -## How to use - -### HuggingFace Spaces - -Go to and try it out! - -### Docker - -There is a pre-built Docker image available on Docker Hub: - -```bash -docker run -p 7860:7860 jacoblincool/tiktoken-calculator -``` - -If you prefer to use Docker Compose, you can clone this repository and run: - -```bash -docker compose up -d -``` - -### Local - -I use Poetry to manage dependencies. - -Setup the virtual environment after cloning this repository: - -```bash -poetry install -``` - -Then run the app: - -```bash -poetry run python app.py -``` diff --git a/spaces/Jayeshbhaal/news_filter_for_social_wellbeing/README.md b/spaces/Jayeshbhaal/news_filter_for_social_wellbeing/README.md deleted file mode 100644 index 05727a770c1115e6597a83784324c2d2adc1b5a0..0000000000000000000000000000000000000000 --- a/spaces/Jayeshbhaal/news_filter_for_social_wellbeing/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: News Filter For Social Wellbeing -emoji: ⚡ -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.8.2 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Jeff2323/ai-comic-factory/src/app/interface/panel/index.tsx b/spaces/Jeff2323/ai-comic-factory/src/app/interface/panel/index.tsx deleted file mode 100644 index 4199bf808494b7e45c88fd438ba001a138e3cd17..0000000000000000000000000000000000000000 --- a/spaces/Jeff2323/ai-comic-factory/src/app/interface/panel/index.tsx +++ /dev/null @@ -1,303 +0,0 @@ -"use client" - -import { useEffect, useRef, useState, useTransition } from "react" -// import AutoSizer from "react-virtualized-auto-sizer" - -import { RenderedScene } from "@/types" - -import { getRender, newRender } from "@/app/engine/render" -import { useStore } from "@/app/store" - -import { cn } from "@/lib/utils" -import { getInitialRenderedScene } from "@/lib/getInitialRenderedScene" -import { Progress } from "@/app/interface/progress" - -// import { see } from "@/app/engine/caption" -// import { replaceTextInSpeechBubbles } from "@/lib/replaceTextInSpeechBubbles" - -export function Panel({ - panel, - className = "", - width = 1, - height = 1, -}: { - panel: number - className?: string - width?: number - height?: number - }) { - const panelId = `${panel}` - - const ref = useRef(null) - const font = useStore(state => state.font) - const preset = useStore(state => state.preset) - - const setGeneratingImages = useStore(state => state.setGeneratingImages) - - const [imageWithText, setImageWithText] = useState("") - const panels = useStore(state => state.panels) - const prompt = panels[panel] || "" - - const captions = useStore(state => state.captions) - const caption = captions[panel] || "" - - const zoomLevel = useStore(state => state.zoomLevel) - const showCaptions = useStore(state => state.showCaptions) - - const addToUpscaleQueue = useStore(state => state.addToUpscaleQueue) - - const [_isPending, startTransition] = useTransition() - const renderedScenes = useStore(state => state.renderedScenes) - const setRendered = useStore(state => state.setRendered) - - const rendered = renderedScenes[panel] || getInitialRenderedScene() - - // keep a ref in sync - const renderedRef = useRef() - const renderedKey = JSON.stringify(rendered) - useEffect(() => { renderedRef.current = rendered }, [renderedKey]) - - const timeoutRef = useRef(null) - - const delay = 3000 + (1000 * panel) - - // since this run in its own loop, we need to use references everywhere - // but perhaps this could be refactored - useEffect(() => { - // console.log("Panel prompt: "+ prompt) - if (!prompt?.length) { return } - - // important: update the status, and clear the scene - setGeneratingImages(panelId, true) - - // just to empty it - setRendered(panelId, getInitialRenderedScene()) - - setTimeout(() => { - startTransition(async () => { - - // console.log(`Loading panel ${panel}..`) - - let newRendered: RenderedScene - try { - newRendered = await newRender({ prompt, width, height }) - } catch (err) { - // "Failed to load the panel! Don't worry, we are retrying..") - newRendered = await newRender({ prompt, width, height }) - } - - if (newRendered) { - // console.log("newRendered:", newRendered) - setRendered(panelId, newRendered) - - // but we are still loading! - } else { - setRendered(panelId, { - renderId: "", - status: "pending", - assetUrl: "", - alt: "", - maskUrl: "", - error: "", - segments: [] - }) - setGeneratingImages(panelId, false) - return - } - }) - }, 2000 * panel) - }, [prompt, width, height]) - - - const checkStatus = () => { - startTransition(async () => { - clearTimeout(timeoutRef.current) - - if (!renderedRef.current?.renderId || renderedRef.current?.status !== "pending") { - timeoutRef.current = setTimeout(checkStatus, delay) - return - } - try { - setGeneratingImages(panelId, true) - // console.log(`Checking job status API for job ${renderedRef.current?.renderId}`) - const newRendered = await getRender(renderedRef.current.renderId) - // console.log("got a response!", newRendered) - - if (JSON.stringify(renderedRef.current) !== JSON.stringify(newRendered)) { - // console.log("updated panel:", newRendered) - setRendered(panelId, renderedRef.current = newRendered) - setGeneratingImages(panelId, true) - } - // console.log("status:", newRendered.status) - - if (newRendered.status === "pending") { - // console.log("job not finished") - timeoutRef.current = setTimeout(checkStatus, delay) - } else if (newRendered.status === "error" || - (newRendered.status === "completed" && !newRendered.assetUrl?.length)) { - // console.log(`panel got an error and/or an empty asset url :/ "${newRendered.error}", but let's try to recover..`) - try { - const newAttempt = await newRender({ prompt, width, height }) - setRendered(panelId, newAttempt) - } catch (err) { - console.error("yeah sorry, something is wrong.. aborting", err) - setGeneratingImages(panelId, false) - } - } else { - console.log("panel finished!") - setGeneratingImages(panelId, false) - addToUpscaleQueue(panelId, newRendered) - } - } catch (err) { - console.error(err) - timeoutRef.current = setTimeout(checkStatus, delay) - } - }) - } - - useEffect(() => { - // console.log("starting timeout") - clearTimeout(timeoutRef.current) - - // normally it should reply in < 1sec, but we could also use an interval - timeoutRef.current = setTimeout(checkStatus, delay) - - return () => { - clearTimeout(timeoutRef.current) - } - }, [prompt, width, height]) - - /* - doing the captionning from the browser is expensive - a simpler solution is to caption directly during SDXL generation - - useEffect(() => { - if (!rendered.assetUrl) { return } - // the asset url can evolve with time (link to a better resolution image) - // however it would be costly to ask for the caption, the low resolution is enough for the semantic resolution - // so we just do nothing if we already have the caption - if (caption) { return } - startTransition(async () => { - try { - const newCaption = await see({ - prompt: "please caption the following image", - imageBase64: rendered.assetUrl - }) - if (newCaption) { - setCaption(newCaption) - } - } catch (err) { - console.error(`failed to generate the caption:`, err) - } - }) - }, [rendered.assetUrl, caption]) - */ - - const frameClassName = cn( - //`flex`, - `w-full h-full`, - `border-stone-800`, - `transition-all duration-200 ease-in-out`, - zoomLevel > 140 ? `border-[2px] md:border-[4px] rounded-sm md:rounded-md` : - zoomLevel > 120 ? `border-[1.5px] md:border-[3px] rounded-xs md:rounded-sm` : - zoomLevel > 90 ? `border-[1px] md:border-[2px] rounded-xs md:rounded-sm` : - zoomLevel > 40 ? `border-[0.5px] md:border-[1px] rounded-none md:rounded-xs` : - `border-transparent md:border-[0.5px] rounded-none md:rounded-none`, - `shadow-sm`, - `overflow-hidden`, - `print:border-[1.5px] print:shadow-none`, - ) - - - /* - text detection (doesn't work) - useEffect(() => { - const fn = async () => { - if (!rendered.assetUrl || !ref.current) { - return - } - - const result = await replaceTextInSpeechBubbles( - rendered.assetUrl, - "Lorem ipsum dolor sit amet, dolor ipsum. Sit amet? Ipsum! Dolor!!!" - ) - if (result) { - setImageWithText(result) - } - } - fn() - - }, [rendered.assetUrl, ref.current]) - */ - - if (prompt && !rendered.assetUrl) { - return ( -
- -
- ) - } - - return ( -
-
140 ? `border-b-[2px] md:border-b-[4px]` : - zoomLevel > 120 ? `border-b-[1.5px] md:border-b-[3px]` : - zoomLevel > 90 ? `border-b-[1px] md:border-b-[2px]` : - zoomLevel > 40 ? `border-b-[0.5px] md:border-b-[1px]` : - `border-transparent md:border-b-[0.5px]`, - `print:border-b-[1.5px]`, - `truncate`, - - zoomLevel > 200 ? `p-4 md:p-8` : - zoomLevel > 180 ? `p-[14px] md:p-8` : - zoomLevel > 160 ? `p-[12px] md:p-[28px]` : - zoomLevel > 140 ? `p-[10px] md:p-[26px]` : - zoomLevel > 120 ? `p-2 md:p-6` : - zoomLevel > 100 ? `p-1.5 md:p-[20px]` : - zoomLevel > 90 ? `p-1.5 md:p-4` : - zoomLevel > 40 ? `p-1 md:p-2` : - `p-0.5 md:p-2`, - - zoomLevel > 220 ? `text-xl md:text-4xl` : - zoomLevel > 200 ? `text-lg md:text-3xl` : - zoomLevel > 180 ? `text-md md:text-2xl` : - zoomLevel > 140 ? `text-2xs md:text-2xl` : - zoomLevel > 120 ? `text-3xs md:text-xl` : - zoomLevel > 100 ? `text-4xs md:text-lg` : - zoomLevel > 90 ? `text-5xs md:text-sm` : - zoomLevel > 40 ? `md:text-xs` : `md:text-2xs`, - - showCaptions ? ( - zoomLevel > 90 ? `block` : `hidden md:block` - ) : `hidden`, - )} - >{caption || ""} -
- {rendered.assetUrl && - {rendered.alt}} -
- ) -} \ No newline at end of file diff --git a/spaces/JeffJing/ZookChatBot/revChatGPT/V2.py b/spaces/JeffJing/ZookChatBot/revChatGPT/V2.py deleted file mode 100644 index 4a1498748c5e022fefb16bf8794aecaf5c922e15..0000000000000000000000000000000000000000 --- a/spaces/JeffJing/ZookChatBot/revChatGPT/V2.py +++ /dev/null @@ -1,372 +0,0 @@ -""" -Official API for ChatGPT -""" -import asyncio -import json -import os -import sys - -import httpx -import requests -import tiktoken -from OpenAIAuth.OpenAIAuth import OpenAIAuth - -ENCODER = tiktoken.get_encoding("gpt2") - - -def get_max_tokens(prompt: str) -> int: - """ - Get the max tokens for a prompt - """ - return 4000 - len(ENCODER.encode(prompt)) - - -class Message: - """ - A single exchange between the user and the bot - """ - - def __init__(self, text: str, author: str) -> None: - self.text: str = text - self.author: str = author - - -class Conversation: - """ - A single conversation - """ - - def __init__(self) -> None: - self.messages: list[Message] = [] - - -CONVERSATION_BUFFER: int = int(os.environ.get("CONVERSATION_BUFFER") or 1500) - - -class Conversations: - """ - Conversation handler - """ - - def __init__(self) -> None: - self.conversations: dict[str][Conversation] = {} - - def add_message(self, message: Message, conversation_id: str) -> None: - """ - Adds a message to a conversation - """ - if conversation_id not in self.conversations: - self.conversations[conversation_id] = Conversation() - self.conversations[conversation_id].messages.append(message) - - def get(self, conversation_id: str) -> str: - """ - Builds a conversation string from a conversation id - """ - if conversation_id not in self.conversations: - return "" - # Build conversation string from messages and check if it's too long - conversation = "" - for message in self.conversations[conversation_id].messages: - conversation += f"{message.author}: {message.text}<|im_sep|>\n\n" - if len(ENCODER.encode(conversation)) > 4000 - CONVERSATION_BUFFER: - self.purge_history(conversation_id) - return self.get(conversation_id) - return conversation - - def purge_history(self, conversation_id: str, num: int = 1): - """ - Remove oldest messages from a conversation - """ - if conversation_id not in self.conversations: - return - self.conversations[conversation_id].messages = self.conversations[ - conversation_id - ].messages[num:] - - def rollback(self, conversation_id: str, num: int = 1): - """ - Remove latest messages from a conversation - """ - if conversation_id not in self.conversations: - return - self.conversations[conversation_id].messages = self.conversations[ - conversation_id - ].messages[:-num] - - def remove(self, conversation_id: str) -> None: - """ - Removes a conversation - """ - if conversation_id in self.conversations: - del self.conversations[conversation_id] - - -BASE_PROMPT = ( - os.environ.get("BASE_PROMPT") - or """You are ChatGPT, a large language model by OpenAI. Respond conversationally\n\n\n""" -) - -PROXY_URL = os.environ.get("PROXY_URL") or "https://chat.duti.tech" - - -class Chatbot: - """ - Handles everything seamlessly - """ - - def __init__( - self, - email: str, - password: str, - paid: bool = False, - proxy=None, - insecure: bool = False, - session_token: str = None, - ) -> None: - self.proxy = proxy - self.email: str = email - self.password: str = password - self.session_token = session_token - self.insecure: bool = insecure - self.api_key: str - self.paid: bool = paid - self.conversations = Conversations() - self.login(email, password, proxy, insecure, session_token) - - async def ask(self, prompt: str, conversation_id: str = None) -> dict: - """ - Gets a response from the API - """ - if conversation_id is None: - conversation_id = "default" - self.conversations.add_message( - Message(prompt, "User"), - conversation_id=conversation_id, - ) - conversation: str = self.conversations.get(conversation_id) - # Build request body - body = self.__get_config() - body["prompt"] = BASE_PROMPT + conversation + "ChatGPT: " - body["max_tokens"] = get_max_tokens(conversation) - async with httpx.AsyncClient(proxies=self.proxy if self.proxy else None).stream( - method="POST", - url=PROXY_URL + "/completions", - data=json.dumps(body), - headers={"Authorization": f"Bearer {self.api_key}"}, - timeout=1080, - ) as response: - full_result = "" - async for line in response.aiter_lines(): - if response.status_code == 429: - print("error: " + "Too many requests") - raise Exception("Too many requests") - elif response.status_code == 523: - print( - "error: " - + "Origin is unreachable. Ensure that you are authenticated and are using the correct pricing model.", - ) - raise Exception( - "Origin is unreachable. Ensure that you are authenticated and are using the correct pricing model.", - ) - elif response.status_code == 503: - print("error: " + "OpenAI error!") - raise Exception("OpenAI error!") - elif response.status_code != 200: - print("error: " + "Unknown error") - raise Exception("Unknown error") - line = line.strip() - if line == "\n" or line == "": - continue - if line == "data: [DONE]": - break - try: - # Remove "data: " from the start of the line - data = json.loads(line[6:]) - if data is None: - continue - full_result += data["choices"][0]["text"].replace("<|im_end|>", "") - if "choices" not in data: - continue - yield data - except json.JSONDecodeError: - continue - self.conversations.add_message( - Message(full_result, "ChatGPT"), - conversation_id=conversation_id, - ) - - def __get_config(self) -> dict: - return { - "temperature": float(os.environ.get("TEMPERATURE") or 0.5), - "top_p": float(os.environ.get("TOP_P") or 1), - "stop": ["<|im_end|>", "<|im_sep|>"], - "presence_penalty": float(os.environ.get("PRESENCE_PENALTY") or 1.0), - "paid": self.paid, - "stream": True, - } - - def login(self, email, password, proxy, insecure, session_token) -> None: - """ - Login to the API - """ - if not insecure: - auth = OpenAIAuth(email_address=email, password=password, proxy=proxy) - if session_token: - auth.session_token = session_token - auth.get_access_token() - self.api_key = auth.access_token - if self.api_key is None: - self.session_token = None - self.login(email, password, proxy, insecure, None) - return - auth.begin() - self.session_token = auth.session_token - self.api_key = auth.access_token - else: - auth_request = requests.post( - PROXY_URL + "/auth", - json={"email": email, "password": password}, - timeout=10, - ) - self.api_key = auth_request.json()["accessToken"] - - -def get_input(prompt): - """ - Multi-line input - """ - # Display the prompt - print(prompt, end="") - - # Initialize an empty list to store the input lines - lines = [] - - # Read lines of input until the user enters an empty line - while True: - line = input() - if line == "": - break - lines.append(line) - - # Join the lines, separated by newlines, and store the result - user_input = "\n".join(lines) - - # Return the input - return user_input - - -async def main(): - """ - Testing main function - """ - import argparse - - print( - """ - ChatGPT - A command-line interface to OpenAI's ChatGPT (https://chat.openai.com/chat) - Repo: github.com/acheong08/ChatGPT - """, - ) - parser = argparse.ArgumentParser() - parser.add_argument( - "-e", - "--email", - help="Your OpenAI email address", - required=False, - ) - parser.add_argument( - "-p", - "--password", - help="Your OpenAI password", - required=False, - ) - parser.add_argument( - "--paid", - help="Use the paid API", - action="store_true", - ) - parser.add_argument( - "--proxy", - help="Use a proxy", - required=False, - type=str, - default=None, - ) - parser.add_argument( - "--insecure-auth", - help="Use an insecure authentication method to bypass OpenAI's geo-blocking", - action="store_true", - ) - parser.add_argument( - "--session_token", - help="Alternative to email and password authentication. Use this if you have Google/Microsoft account.", - required=False, - ) - args = parser.parse_args() - - if (args.email is None or args.password is None) and args.session_token is None: - print("error: " + "Please provide your email and password") - return - print("Logging in...") - chatbot = Chatbot( - args.email, - args.password, - paid=args.paid, - proxy=args.proxy, - insecure=args.insecure_auth, - session_token=args.session_token, - ) - print("Logged in\n") - - print("Type '!help' to show a full list of commands") - print("Press enter twice to submit your question.\n") - - def commands(command: str) -> bool: - if command == "!help": - print( - """ - !help - Show this help message - !reset - Clear the current conversation - !rollback - Remove the latest messages from the conversation - !exit - Exit the program - """, - ) - elif command == "!reset": - chatbot.conversations.remove("default") - print("Conversation cleared") - elif command.startswith("!rollback"): - try: - num = int(command.split(" ")[1]) - chatbot.conversations.rollback("default", num) - print(f"Removed {num} messages from the conversation") - except IndexError: - print("Please specify the number of messages to remove") - except ValueError: - print("Please specify a valid number of messages to remove") - elif command == "!exit": - print("Exiting...") - sys.exit(0) - else: - return False - return True - - try: - while True: - prompt = get_input("\nYou:\n") - if prompt.startswith("!"): - if commands(prompt): - continue - print("ChatGPT:") - async for line in chatbot.ask(prompt=prompt): - result = line["choices"][0]["text"].replace("<|im_end|>", "") - print(result, end="") - sys.stdout.flush() - print() - except KeyboardInterrupt: - print("Exiting...") - sys.exit(0) - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/spaces/JeffJing/ZookChatBot/steamship/utils/signed_urls.py b/spaces/JeffJing/ZookChatBot/steamship/utils/signed_urls.py deleted file mode 100644 index 621b23ceecf7e3af450d0b8a129a8627662df263..0000000000000000000000000000000000000000 --- a/spaces/JeffJing/ZookChatBot/steamship/utils/signed_urls.py +++ /dev/null @@ -1,100 +0,0 @@ -import json -import logging -from pathlib import Path -from typing import Optional - -import requests - -from steamship import SteamshipError -from steamship.utils.url import apply_localstack_url_fix - - -def url_to_json(url: str) -> any: - """ - Downloads the Signed URL and returns the contents as JSON. - """ - bytes = url_to_bytes(url) - json_string = bytes.decode("utf8") - return json.loads(json_string) - - -def url_to_bytes(url: str) -> bytes: - """ - Downloads the Signed URL and returns the contents as bytes. - - This is a helper function to consolidate Steamship Client URL fetching to ensure a single point of handling for: - * Error messages - * Any required manipulations for URL signed URLs - * Any required manipulations for localstack-based environments - - Note that the base API Client does not use this method on purpose: in the event of error code, it inspects the - contents of the response for a SteamshipError. - """ - url = apply_localstack_url_fix(url) - logging.info(f"Downloading: {url}.") - - resp = requests.get(url) - if resp.status_code != 200: - # TODO: At least Localstack send to reply with HTTP 200 even if the file isn't found! - # The full response contains: - # - # NoSuchKey - # - # So we **could** check the response text even in the event of 200 but that seems wrong.. - if "NoSuchKey" in resp.text: - raise SteamshipError( - message=f"The file at signed URL {url} did not exist. HTTP {resp.status_code}. Content: {resp.text}" - ) - else: - raise SteamshipError( - message=f"There was an error downloading from the signed url: {url}. HTTP {resp.status_code}. Content: {resp.text}" - ) - return resp.content - - -def download_from_signed_url(url: str, to_file: Path = None) -> Path: - """ - Downloads the Signed URL to the filename `desired_filename` in a temporary directory on disk. - """ - content = url_to_bytes(url) - - if not to_file.parent.exists(): - to_file.parent.mkdir(parents=True, exist_ok=True) - - with open(to_file, "wb") as f: - logging.debug(f"Got contents of: {url}") - f.write(content) - logging.debug(f"Wrote contents of: {url} to {to_file}") - return Path(to_file) - - -def upload_to_signed_url(url: str, _bytes: Optional[bytes] = None, filepath: Optional[Path] = None): - """ - Uploads either the bytes or filepath contents to the provided Signed URL. - """ - - url = apply_localstack_url_fix(url) - if _bytes is not None: - logging.info(f"Uploading provided bytes to: {url}") - elif filepath is not None: - logging.info(f"Uploading file at {filepath} to: {url}") - with open(filepath, "rb") as f: - _bytes = f.read() - else: - raise SteamshipError( - message="Unable to upload data to signed URL -- neither a filepath nor bytes were provided.", - suggestion="Please provide either the `bytes` or the `filepath` argument", - ) - - http_response = requests.put( - url, data=_bytes, headers={"Content-Type": "application/octet-stream"} - ) - - # S3 returns 204 upon success; we include 200 here for safety. - if http_response.status_code not in [200, 204]: - logging.error(f"File upload error. file={filepath}. url= {url}") - logging.error(f"Status Code: {http_response.status_code}") - logging.error(f"Response Text: {http_response.text}") - raise SteamshipError( - message=f"Unable to upload data to signed URL. Status code: {http_response.status_code}. Status text: {http_response.text}" - ) diff --git a/spaces/Jumon/whisper-zero-shot-audio-classification/README.md b/spaces/Jumon/whisper-zero-shot-audio-classification/README.md deleted file mode 100644 index b60abe740715a54f21a235208d630b703a863898..0000000000000000000000000000000000000000 --- a/spaces/Jumon/whisper-zero-shot-audio-classification/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Zero-shot Audio Classification using Whisper -emoji: 🌍📢 -colorFrom: blue -colorTo: green -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/KPCGD/bingo/src/components/ui/voice/index.tsx b/spaces/KPCGD/bingo/src/components/ui/voice/index.tsx deleted file mode 100644 index 4adcb632226bfced8b97092782811edf08b56569..0000000000000000000000000000000000000000 --- a/spaces/KPCGD/bingo/src/components/ui/voice/index.tsx +++ /dev/null @@ -1,28 +0,0 @@ -import './index.scss' - -export interface VoiceProps extends CSSPropertyRule { - num?: number; - duration?: number; -} -export default function Voice({ duration = 400, num = 7, ...others }) { - return ( -
- {Array.from({ length: num }).map((_, index) => { - const randomDuration = Math.random() * 100 + duration - const initialDelay = Math.random() * 2 * duration - const initialScale = Math.sin((index + 1) * Math.PI / num) - return ( -
- ) - })} -
- ) -} diff --git a/spaces/Kayson/InstructDiffusion/stable_diffusion/ldm/modules/image_degradation/bsrgan_light.py b/spaces/Kayson/InstructDiffusion/stable_diffusion/ldm/modules/image_degradation/bsrgan_light.py deleted file mode 100644 index 9e1f823996bf559e9b015ea9aa2b3cd38dd13af1..0000000000000000000000000000000000000000 --- a/spaces/Kayson/InstructDiffusion/stable_diffusion/ldm/modules/image_degradation/bsrgan_light.py +++ /dev/null @@ -1,650 +0,0 @@ -# -*- coding: utf-8 -*- -import numpy as np -import cv2 -import torch - -from functools import partial -import random -from scipy import ndimage -import scipy -import scipy.stats as ss -from scipy.interpolate import interp2d -from scipy.linalg import orth -import albumentations - -import ldm.modules.image_degradation.utils_image as util - -""" -# -------------------------------------------- -# Super-Resolution -# -------------------------------------------- -# -# Kai Zhang (cskaizhang@gmail.com) -# https://github.com/cszn -# From 2019/03--2021/08 -# -------------------------------------------- -""" - - -def modcrop_np(img, sf): - ''' - Args: - img: numpy image, WxH or WxHxC - sf: scale factor - Return: - cropped image - ''' - w, h = img.shape[:2] - im = np.copy(img) - return im[:w - w % sf, :h - h % sf, ...] - - -""" -# -------------------------------------------- -# anisotropic Gaussian kernels -# -------------------------------------------- -""" - - -def analytic_kernel(k): - """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" - k_size = k.shape[0] - # Calculate the big kernels size - big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) - # Loop over the small kernel to fill the big one - for r in range(k_size): - for c in range(k_size): - big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k - # Crop the edges of the big kernel to ignore very small values and increase run time of SR - crop = k_size // 2 - cropped_big_k = big_k[crop:-crop, crop:-crop] - # Normalize to 1 - return cropped_big_k / cropped_big_k.sum() - - -def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): - """ generate an anisotropic Gaussian kernel - Args: - ksize : e.g., 15, kernel size - theta : [0, pi], rotation angle range - l1 : [0.1,50], scaling of eigenvalues - l2 : [0.1,l1], scaling of eigenvalues - If l1 = l2, will get an isotropic Gaussian kernel. - Returns: - k : kernel - """ - - v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.])) - V = np.array([[v[0], v[1]], [v[1], -v[0]]]) - D = np.array([[l1, 0], [0, l2]]) - Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) - k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) - - return k - - -def gm_blur_kernel(mean, cov, size=15): - center = size / 2.0 + 0.5 - k = np.zeros([size, size]) - for y in range(size): - for x in range(size): - cy = y - center + 1 - cx = x - center + 1 - k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov) - - k = k / np.sum(k) - return k - - -def shift_pixel(x, sf, upper_left=True): - """shift pixel for super-resolution with different scale factors - Args: - x: WxHxC or WxH - sf: scale factor - upper_left: shift direction - """ - h, w = x.shape[:2] - shift = (sf - 1) * 0.5 - xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) - if upper_left: - x1 = xv + shift - y1 = yv + shift - else: - x1 = xv - shift - y1 = yv - shift - - x1 = np.clip(x1, 0, w - 1) - y1 = np.clip(y1, 0, h - 1) - - if x.ndim == 2: - x = interp2d(xv, yv, x)(x1, y1) - if x.ndim == 3: - for i in range(x.shape[-1]): - x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) - - return x - - -def blur(x, k): - ''' - x: image, NxcxHxW - k: kernel, Nx1xhxw - ''' - n, c = x.shape[:2] - p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 - x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate') - k = k.repeat(1, c, 1, 1) - k = k.view(-1, 1, k.shape[2], k.shape[3]) - x = x.view(1, -1, x.shape[2], x.shape[3]) - x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) - x = x.view(n, c, x.shape[2], x.shape[3]) - - return x - - -def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0): - """" - # modified version of https://github.com/assafshocher/BlindSR_dataset_generator - # Kai Zhang - # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var - # max_var = 2.5 * sf - """ - # Set random eigen-vals (lambdas) and angle (theta) for COV matrix - lambda_1 = min_var + np.random.rand() * (max_var - min_var) - lambda_2 = min_var + np.random.rand() * (max_var - min_var) - theta = np.random.rand() * np.pi # random theta - noise = -noise_level + np.random.rand(*k_size) * noise_level * 2 - - # Set COV matrix using Lambdas and Theta - LAMBDA = np.diag([lambda_1, lambda_2]) - Q = np.array([[np.cos(theta), -np.sin(theta)], - [np.sin(theta), np.cos(theta)]]) - SIGMA = Q @ LAMBDA @ Q.T - INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] - - # Set expectation position (shifting kernel for aligned image) - MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) - MU = MU[None, None, :, None] - - # Create meshgrid for Gaussian - [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1])) - Z = np.stack([X, Y], 2)[:, :, :, None] - - # Calcualte Gaussian for every pixel of the kernel - ZZ = Z - MU - ZZ_t = ZZ.transpose(0, 1, 3, 2) - raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise) - - # shift the kernel so it will be centered - # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor) - - # Normalize the kernel and return - # kernel = raw_kernel_centered / np.sum(raw_kernel_centered) - kernel = raw_kernel / np.sum(raw_kernel) - return kernel - - -def fspecial_gaussian(hsize, sigma): - hsize = [hsize, hsize] - siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0] - std = sigma - [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1)) - arg = -(x * x + y * y) / (2 * std * std) - h = np.exp(arg) - h[h < scipy.finfo(float).eps * h.max()] = 0 - sumh = h.sum() - if sumh != 0: - h = h / sumh - return h - - -def fspecial_laplacian(alpha): - alpha = max([0, min([alpha, 1])]) - h1 = alpha / (alpha + 1) - h2 = (1 - alpha) / (alpha + 1) - h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]] - h = np.array(h) - return h - - -def fspecial(filter_type, *args, **kwargs): - ''' - python code from: - https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py - ''' - if filter_type == 'gaussian': - return fspecial_gaussian(*args, **kwargs) - if filter_type == 'laplacian': - return fspecial_laplacian(*args, **kwargs) - - -""" -# -------------------------------------------- -# degradation models -# -------------------------------------------- -""" - - -def bicubic_degradation(x, sf=3): - ''' - Args: - x: HxWxC image, [0, 1] - sf: down-scale factor - Return: - bicubicly downsampled LR image - ''' - x = util.imresize_np(x, scale=1 / sf) - return x - - -def srmd_degradation(x, k, sf=3): - ''' blur + bicubic downsampling - Args: - x: HxWxC image, [0, 1] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - Reference: - @inproceedings{zhang2018learning, - title={Learning a single convolutional super-resolution network for multiple degradations}, - author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - pages={3262--3271}, - year={2018} - } - ''' - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror' - x = bicubic_degradation(x, sf=sf) - return x - - -def dpsr_degradation(x, k, sf=3): - ''' bicubic downsampling + blur - Args: - x: HxWxC image, [0, 1] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - Reference: - @inproceedings{zhang2019deep, - title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, - author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - pages={1671--1681}, - year={2019} - } - ''' - x = bicubic_degradation(x, sf=sf) - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') - return x - - -def classical_degradation(x, k, sf=3): - ''' blur + downsampling - Args: - x: HxWxC image, [0, 1]/[0, 255] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - ''' - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') - # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) - st = 0 - return x[st::sf, st::sf, ...] - - -def add_sharpening(img, weight=0.5, radius=50, threshold=10): - """USM sharpening. borrowed from real-ESRGAN - Input image: I; Blurry image: B. - 1. K = I + weight * (I - B) - 2. Mask = 1 if abs(I - B) > threshold, else: 0 - 3. Blur mask: - 4. Out = Mask * K + (1 - Mask) * I - Args: - img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. - weight (float): Sharp weight. Default: 1. - radius (float): Kernel size of Gaussian blur. Default: 50. - threshold (int): - """ - if radius % 2 == 0: - radius += 1 - blur = cv2.GaussianBlur(img, (radius, radius), 0) - residual = img - blur - mask = np.abs(residual) * 255 > threshold - mask = mask.astype('float32') - soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) - - K = img + weight * residual - K = np.clip(K, 0, 1) - return soft_mask * K + (1 - soft_mask) * img - - -def add_blur(img, sf=4): - wd2 = 4.0 + sf - wd = 2.0 + 0.2 * sf - - wd2 = wd2/4 - wd = wd/4 - - if random.random() < 0.5: - l1 = wd2 * random.random() - l2 = wd2 * random.random() - k = anisotropic_Gaussian(ksize=random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2) - else: - k = fspecial('gaussian', random.randint(2, 4) + 3, wd * random.random()) - img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror') - - return img - - -def add_resize(img, sf=4): - rnum = np.random.rand() - if rnum > 0.8: # up - sf1 = random.uniform(1, 2) - elif rnum < 0.7: # down - sf1 = random.uniform(0.5 / sf, 1) - else: - sf1 = 1.0 - img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3])) - img = np.clip(img, 0.0, 1.0) - - return img - - -# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): -# noise_level = random.randint(noise_level1, noise_level2) -# rnum = np.random.rand() -# if rnum > 0.6: # add color Gaussian noise -# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) -# elif rnum < 0.4: # add grayscale Gaussian noise -# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) -# else: # add noise -# L = noise_level2 / 255. -# D = np.diag(np.random.rand(3)) -# U = orth(np.random.rand(3, 3)) -# conv = np.dot(np.dot(np.transpose(U), D), U) -# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) -# img = np.clip(img, 0.0, 1.0) -# return img - -def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): - noise_level = random.randint(noise_level1, noise_level2) - rnum = np.random.rand() - if rnum > 0.6: # add color Gaussian noise - img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) - elif rnum < 0.4: # add grayscale Gaussian noise - img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) - else: # add noise - L = noise_level2 / 255. - D = np.diag(np.random.rand(3)) - U = orth(np.random.rand(3, 3)) - conv = np.dot(np.dot(np.transpose(U), D), U) - img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) - img = np.clip(img, 0.0, 1.0) - return img - - -def add_speckle_noise(img, noise_level1=2, noise_level2=25): - noise_level = random.randint(noise_level1, noise_level2) - img = np.clip(img, 0.0, 1.0) - rnum = random.random() - if rnum > 0.6: - img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) - elif rnum < 0.4: - img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) - else: - L = noise_level2 / 255. - D = np.diag(np.random.rand(3)) - U = orth(np.random.rand(3, 3)) - conv = np.dot(np.dot(np.transpose(U), D), U) - img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) - img = np.clip(img, 0.0, 1.0) - return img - - -def add_Poisson_noise(img): - img = np.clip((img * 255.0).round(), 0, 255) / 255. - vals = 10 ** (2 * random.random() + 2.0) # [2, 4] - if random.random() < 0.5: - img = np.random.poisson(img * vals).astype(np.float32) / vals - else: - img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114]) - img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255. - noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray - img += noise_gray[:, :, np.newaxis] - img = np.clip(img, 0.0, 1.0) - return img - - -def add_JPEG_noise(img): - quality_factor = random.randint(80, 95) - img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR) - result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor]) - img = cv2.imdecode(encimg, 1) - img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB) - return img - - -def random_crop(lq, hq, sf=4, lq_patchsize=64): - h, w = lq.shape[:2] - rnd_h = random.randint(0, h - lq_patchsize) - rnd_w = random.randint(0, w - lq_patchsize) - lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :] - - rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf) - hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :] - return lq, hq - - -def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): - """ - This is the degradation model of BSRGAN from the paper - "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" - ---------- - img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) - sf: scale factor - isp_model: camera ISP model - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 - sf_ori = sf - - h1, w1 = img.shape[:2] - img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = img.shape[:2] - - if h < lq_patchsize * sf or w < lq_patchsize * sf: - raise ValueError(f'img size ({h1}X{w1}) is too small!') - - hq = img.copy() - - if sf == 4 and random.random() < scale2_prob: # downsample1 - if np.random.rand() < 0.5: - img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - img = util.imresize_np(img, 1 / 2, True) - img = np.clip(img, 0.0, 1.0) - sf = 2 - - shuffle_order = random.sample(range(7), 7) - idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) - if idx1 > idx2: # keep downsample3 last - shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] - - for i in shuffle_order: - - if i == 0: - img = add_blur(img, sf=sf) - - elif i == 1: - img = add_blur(img, sf=sf) - - elif i == 2: - a, b = img.shape[1], img.shape[0] - # downsample2 - if random.random() < 0.75: - sf1 = random.uniform(1, 2 * sf) - img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) - k_shifted = shift_pixel(k, sf) - k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel - img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror') - img = img[0::sf, 0::sf, ...] # nearest downsampling - img = np.clip(img, 0.0, 1.0) - - elif i == 3: - # downsample3 - img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) - img = np.clip(img, 0.0, 1.0) - - elif i == 4: - # add Gaussian noise - img = add_Gaussian_noise(img, noise_level1=2, noise_level2=8) - - elif i == 5: - # add JPEG noise - if random.random() < jpeg_prob: - img = add_JPEG_noise(img) - - elif i == 6: - # add processed camera sensor noise - if random.random() < isp_prob and isp_model is not None: - with torch.no_grad(): - img, hq = isp_model.forward(img.copy(), hq) - - # add final JPEG compression noise - img = add_JPEG_noise(img) - - # random crop - img, hq = random_crop(img, hq, sf_ori, lq_patchsize) - - return img, hq - - -# todo no isp_model? -def degradation_bsrgan_variant(image, sf=4, isp_model=None): - """ - This is the degradation model of BSRGAN from the paper - "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" - ---------- - sf: scale factor - isp_model: camera ISP model - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - image = util.uint2single(image) - isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 - sf_ori = sf - - h1, w1 = image.shape[:2] - image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = image.shape[:2] - - hq = image.copy() - - if sf == 4 and random.random() < scale2_prob: # downsample1 - if np.random.rand() < 0.5: - image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - image = util.imresize_np(image, 1 / 2, True) - image = np.clip(image, 0.0, 1.0) - sf = 2 - - shuffle_order = random.sample(range(7), 7) - idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) - if idx1 > idx2: # keep downsample3 last - shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] - - for i in shuffle_order: - - if i == 0: - image = add_blur(image, sf=sf) - - # elif i == 1: - # image = add_blur(image, sf=sf) - - if i == 0: - pass - - elif i == 2: - a, b = image.shape[1], image.shape[0] - # downsample2 - if random.random() < 0.8: - sf1 = random.uniform(1, 2 * sf) - image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) - k_shifted = shift_pixel(k, sf) - k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel - image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror') - image = image[0::sf, 0::sf, ...] # nearest downsampling - - image = np.clip(image, 0.0, 1.0) - - elif i == 3: - # downsample3 - image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) - image = np.clip(image, 0.0, 1.0) - - elif i == 4: - # add Gaussian noise - image = add_Gaussian_noise(image, noise_level1=1, noise_level2=2) - - elif i == 5: - # add JPEG noise - if random.random() < jpeg_prob: - image = add_JPEG_noise(image) - # - # elif i == 6: - # # add processed camera sensor noise - # if random.random() < isp_prob and isp_model is not None: - # with torch.no_grad(): - # img, hq = isp_model.forward(img.copy(), hq) - - # add final JPEG compression noise - image = add_JPEG_noise(image) - image = util.single2uint(image) - example = {"image": image} - return example - - - - -if __name__ == '__main__': - print("hey") - img = util.imread_uint('utils/test.png', 3) - img = img[:448, :448] - h = img.shape[0] // 4 - print("resizing to", h) - sf = 4 - deg_fn = partial(degradation_bsrgan_variant, sf=sf) - for i in range(20): - print(i) - img_hq = img - img_lq = deg_fn(img)["image"] - img_hq, img_lq = util.uint2single(img_hq), util.uint2single(img_lq) - print(img_lq) - img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img_hq)["image"] - print(img_lq.shape) - print("bicubic", img_lq_bicubic.shape) - print(img_hq.shape) - lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), - interpolation=0) - lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), - (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), - interpolation=0) - img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1) - util.imsave(img_concat, str(i) + '.png') diff --git a/spaces/Kevin676/AutoGPT/autogpt/logs.py b/spaces/Kevin676/AutoGPT/autogpt/logs.py deleted file mode 100644 index 35037404a98f7be9b7d577b625cc190ca27f4566..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/AutoGPT/autogpt/logs.py +++ /dev/null @@ -1,332 +0,0 @@ -"""Logging module for Auto-GPT.""" -import json -import logging -import os -import random -import re -import time -import traceback -from logging import LogRecord - -from colorama import Fore, Style - -from autogpt.config import Config, Singleton -from autogpt.speech import say_text - -CFG = Config() - - -class Logger(metaclass=Singleton): - """ - Logger that handle titles in different colors. - Outputs logs in console, activity.log, and errors.log - For console handler: simulates typing - """ - - def __init__(self): - # create log directory if it doesn't exist - this_files_dir_path = os.path.dirname(__file__) - log_dir = os.path.join(this_files_dir_path, "../logs") - if not os.path.exists(log_dir): - os.makedirs(log_dir) - - log_file = "activity.log" - error_file = "error.log" - - console_formatter = AutoGptFormatter("%(title_color)s %(message)s") - - # Create a handler for console which simulate typing - self.typing_console_handler = TypingConsoleHandler() - self.typing_console_handler.setLevel(logging.INFO) - self.typing_console_handler.setFormatter(console_formatter) - - # Create a handler for console without typing simulation - self.console_handler = ConsoleHandler() - self.console_handler.setLevel(logging.DEBUG) - self.console_handler.setFormatter(console_formatter) - - # Info handler in activity.log - self.file_handler = logging.FileHandler( - os.path.join(log_dir, log_file), "a", "utf-8" - ) - self.file_handler.setLevel(logging.DEBUG) - info_formatter = AutoGptFormatter( - "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" - ) - self.file_handler.setFormatter(info_formatter) - - # Error handler error.log - error_handler = logging.FileHandler( - os.path.join(log_dir, error_file), "a", "utf-8" - ) - error_handler.setLevel(logging.ERROR) - error_formatter = AutoGptFormatter( - "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" - " %(message_no_color)s" - ) - error_handler.setFormatter(error_formatter) - - self.typing_logger = logging.getLogger("TYPER") - self.typing_logger.addHandler(self.typing_console_handler) - self.typing_logger.addHandler(self.file_handler) - self.typing_logger.addHandler(error_handler) - self.typing_logger.setLevel(logging.DEBUG) - - self.logger = logging.getLogger("LOGGER") - self.logger.addHandler(self.console_handler) - self.logger.addHandler(self.file_handler) - self.logger.addHandler(error_handler) - self.logger.setLevel(logging.DEBUG) - - def typewriter_log( - self, title="", title_color="", content="", speak_text=False, level=logging.INFO - ): - if speak_text and CFG.speak_mode: - say_text(f"{title}. {content}") - - if content: - if isinstance(content, list): - content = " ".join(content) - else: - content = "" - - self.typing_logger.log( - level, content, extra={"title": title, "color": title_color} - ) - - def debug( - self, - message, - title="", - title_color="", - ): - self._log(title, title_color, message, logging.DEBUG) - - def warn( - self, - message, - title="", - title_color="", - ): - self._log(title, title_color, message, logging.WARN) - - def error(self, title, message=""): - self._log(title, Fore.RED, message, logging.ERROR) - - def _log(self, title="", title_color="", message="", level=logging.INFO): - if message: - if isinstance(message, list): - message = " ".join(message) - self.logger.log(level, message, extra={"title": title, "color": title_color}) - - def set_level(self, level): - self.logger.setLevel(level) - self.typing_logger.setLevel(level) - - def double_check(self, additionalText=None): - if not additionalText: - additionalText = ( - "Please ensure you've setup and configured everything" - " correctly. Read https://github.com/Torantulino/Auto-GPT#readme to " - "double check. You can also create a github issue or join the discord" - " and ask there!" - ) - - self.typewriter_log("DOUBLE CHECK CONFIGURATION", Fore.YELLOW, additionalText) - - -""" -Output stream to console using simulated typing -""" - - -class TypingConsoleHandler(logging.StreamHandler): - def emit(self, record): - min_typing_speed = 0.05 - max_typing_speed = 0.01 - - msg = self.format(record) - try: - words = msg.split() - for i, word in enumerate(words): - print(word, end="", flush=True) - if i < len(words) - 1: - print(" ", end="", flush=True) - typing_speed = random.uniform(min_typing_speed, max_typing_speed) - time.sleep(typing_speed) - # type faster after each word - min_typing_speed = min_typing_speed * 0.95 - max_typing_speed = max_typing_speed * 0.95 - print() - except Exception: - self.handleError(record) - - -class ConsoleHandler(logging.StreamHandler): - def emit(self, record) -> None: - msg = self.format(record) - try: - print(msg) - except Exception: - self.handleError(record) - - -class AutoGptFormatter(logging.Formatter): - """ - Allows to handle custom placeholders 'title_color' and 'message_no_color'. - To use this formatter, make sure to pass 'color', 'title' as log extras. - """ - - def format(self, record: LogRecord) -> str: - if hasattr(record, "color"): - record.title_color = ( - getattr(record, "color") - + getattr(record, "title") - + " " - + Style.RESET_ALL - ) - else: - record.title_color = getattr(record, "title") - if hasattr(record, "msg"): - record.message_no_color = remove_color_codes(getattr(record, "msg")) - else: - record.message_no_color = "" - return super().format(record) - - -def remove_color_codes(s: str) -> str: - ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])") - return ansi_escape.sub("", s) - - -logger = Logger() - - -def print_assistant_thoughts(ai_name, assistant_reply): - """Prints the assistant's thoughts to the console""" - from autogpt.json_utils.json_fix_llm import ( - attempt_to_fix_json_by_finding_outermost_brackets, - fix_and_parse_json, - ) - - try: - try: - # Parse and print Assistant response - assistant_reply_json = fix_and_parse_json(assistant_reply) - except json.JSONDecodeError: - logger.error("Error: Invalid JSON in assistant thoughts\n", assistant_reply) - assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets( - assistant_reply - ) - if isinstance(assistant_reply_json, str): - assistant_reply_json = fix_and_parse_json(assistant_reply_json) - - # Check if assistant_reply_json is a string and attempt to parse - # it into a JSON object - if isinstance(assistant_reply_json, str): - try: - assistant_reply_json = json.loads(assistant_reply_json) - except json.JSONDecodeError: - logger.error("Error: Invalid JSON\n", assistant_reply) - assistant_reply_json = ( - attempt_to_fix_json_by_finding_outermost_brackets( - assistant_reply_json - ) - ) - - assistant_thoughts_reasoning = None - assistant_thoughts_plan = None - assistant_thoughts_speak = None - assistant_thoughts_criticism = None - if not isinstance(assistant_reply_json, dict): - assistant_reply_json = {} - assistant_thoughts = assistant_reply_json.get("thoughts", {}) - assistant_thoughts_text = assistant_thoughts.get("text") - - if assistant_thoughts: - assistant_thoughts_reasoning = assistant_thoughts.get("reasoning") - assistant_thoughts_plan = assistant_thoughts.get("plan") - assistant_thoughts_criticism = assistant_thoughts.get("criticism") - assistant_thoughts_speak = assistant_thoughts.get("speak") - - logger.typewriter_log( - f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}" - ) - logger.typewriter_log( - "REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}" - ) - - if assistant_thoughts_plan: - logger.typewriter_log("PLAN:", Fore.YELLOW, "") - # If it's a list, join it into a string - if isinstance(assistant_thoughts_plan, list): - assistant_thoughts_plan = "\n".join(assistant_thoughts_plan) - elif isinstance(assistant_thoughts_plan, dict): - assistant_thoughts_plan = str(assistant_thoughts_plan) - - # Split the input_string using the newline character and dashes - lines = assistant_thoughts_plan.split("\n") - for line in lines: - line = line.lstrip("- ") - logger.typewriter_log("- ", Fore.GREEN, line.strip()) - - logger.typewriter_log( - "CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}" - ) - # Speak the assistant's thoughts - if CFG.speak_mode and assistant_thoughts_speak: - say_text(assistant_thoughts_speak) - else: - logger.typewriter_log("SPEAK:", Fore.YELLOW, f"{assistant_thoughts_speak}") - - return assistant_reply_json - except json.decoder.JSONDecodeError: - logger.error("Error: Invalid JSON\n", assistant_reply) - if CFG.speak_mode: - say_text( - "I have received an invalid JSON response from the OpenAI API." - " I cannot ignore this response." - ) - - # All other errors, return "Error: + error message" - except Exception: - call_stack = traceback.format_exc() - logger.error("Error: \n", call_stack) - - -def print_assistant_thoughts( - ai_name: object, assistant_reply_json_valid: object -) -> None: - assistant_thoughts_reasoning = None - assistant_thoughts_plan = None - assistant_thoughts_speak = None - assistant_thoughts_criticism = None - - assistant_thoughts = assistant_reply_json_valid.get("thoughts", {}) - assistant_thoughts_text = assistant_thoughts.get("text") - if assistant_thoughts: - assistant_thoughts_reasoning = assistant_thoughts.get("reasoning") - assistant_thoughts_plan = assistant_thoughts.get("plan") - assistant_thoughts_criticism = assistant_thoughts.get("criticism") - assistant_thoughts_speak = assistant_thoughts.get("speak") - logger.typewriter_log( - f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}" - ) - logger.typewriter_log("REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}") - if assistant_thoughts_plan: - logger.typewriter_log("PLAN:", Fore.YELLOW, "") - # If it's a list, join it into a string - if isinstance(assistant_thoughts_plan, list): - assistant_thoughts_plan = "\n".join(assistant_thoughts_plan) - elif isinstance(assistant_thoughts_plan, dict): - assistant_thoughts_plan = str(assistant_thoughts_plan) - - # Split the input_string using the newline character and dashes - lines = assistant_thoughts_plan.split("\n") - for line in lines: - line = line.lstrip("- ") - logger.typewriter_log("- ", Fore.GREEN, line.strip()) - logger.typewriter_log("CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}") - # Speak the assistant's thoughts - if CFG.speak_mode and assistant_thoughts_speak: - say_text(assistant_thoughts_speak) diff --git a/spaces/Kevin676/AutoGPT/tests/unit/test_commands.py b/spaces/Kevin676/AutoGPT/tests/unit/test_commands.py deleted file mode 100644 index ecbac9b73bd9ad872931d77e144dd853b3d8ef64..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/AutoGPT/tests/unit/test_commands.py +++ /dev/null @@ -1,22 +0,0 @@ -"""Unit tests for the commands module""" -from unittest.mock import MagicMock, patch - -import pytest - -import autogpt.agent.agent_manager as agent_manager -from autogpt.app import execute_command, list_agents, start_agent - - -@pytest.mark.integration_test -def test_make_agent() -> None: - """Test the make_agent command""" - with patch("openai.ChatCompletion.create") as mock: - obj = MagicMock() - obj.response.choices[0].messages[0].content = "Test message" - mock.return_value = obj - start_agent("Test Agent", "chat", "Hello, how are you?", "gpt2") - agents = list_agents() - assert "List of agents:\n0: chat" == agents - start_agent("Test Agent 2", "write", "Hello, how are you?", "gpt2") - agents = list_agents() - assert "List of agents:\n0: chat\n1: write" == agents diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/ppg_extractor/utterance_mvn.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/ppg_extractor/utterance_mvn.py deleted file mode 100644 index 37fb0c1b918bff60d0c6b5fef883b2f735e7cd79..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/ppg_extractor/utterance_mvn.py +++ /dev/null @@ -1,82 +0,0 @@ -from typing import Tuple - -import torch - -from .nets_utils import make_pad_mask - - -class UtteranceMVN(torch.nn.Module): - def __init__( - self, norm_means: bool = True, norm_vars: bool = False, eps: float = 1.0e-20, - ): - super().__init__() - self.norm_means = norm_means - self.norm_vars = norm_vars - self.eps = eps - - def extra_repr(self): - return f"norm_means={self.norm_means}, norm_vars={self.norm_vars}" - - def forward( - self, x: torch.Tensor, ilens: torch.Tensor = None - ) -> Tuple[torch.Tensor, torch.Tensor]: - """Forward function - - Args: - x: (B, L, ...) - ilens: (B,) - - """ - return utterance_mvn( - x, - ilens, - norm_means=self.norm_means, - norm_vars=self.norm_vars, - eps=self.eps, - ) - - -def utterance_mvn( - x: torch.Tensor, - ilens: torch.Tensor = None, - norm_means: bool = True, - norm_vars: bool = False, - eps: float = 1.0e-20, -) -> Tuple[torch.Tensor, torch.Tensor]: - """Apply utterance mean and variance normalization - - Args: - x: (B, T, D), assumed zero padded - ilens: (B,) - norm_means: - norm_vars: - eps: - - """ - if ilens is None: - ilens = x.new_full([x.size(0)], x.size(1)) - ilens_ = ilens.to(x.device, x.dtype).view(-1, *[1 for _ in range(x.dim() - 1)]) - # Zero padding - if x.requires_grad: - x = x.masked_fill(make_pad_mask(ilens, x, 1), 0.0) - else: - x.masked_fill_(make_pad_mask(ilens, x, 1), 0.0) - # mean: (B, 1, D) - mean = x.sum(dim=1, keepdim=True) / ilens_ - - if norm_means: - x -= mean - - if norm_vars: - var = x.pow(2).sum(dim=1, keepdim=True) / ilens_ - std = torch.clamp(var.sqrt(), min=eps) - x = x / std.sqrt() - return x, ilens - else: - if norm_vars: - y = x - mean - y.masked_fill_(make_pad_mask(ilens, y, 1), 0.0) - var = y.pow(2).sum(dim=1, keepdim=True) / ilens_ - std = torch.clamp(var.sqrt(), min=eps) - x /= std - return x, ilens diff --git a/spaces/Kevin676/Real-Time-Voice-Cloning/encoder/data_objects/speaker.py b/spaces/Kevin676/Real-Time-Voice-Cloning/encoder/data_objects/speaker.py deleted file mode 100644 index 494e882fe34fc38dcc793ab8c74a6cc2376bb7b5..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/Real-Time-Voice-Cloning/encoder/data_objects/speaker.py +++ /dev/null @@ -1,40 +0,0 @@ -from encoder.data_objects.random_cycler import RandomCycler -from encoder.data_objects.utterance import Utterance -from pathlib import Path - -# Contains the set of utterances of a single speaker -class Speaker: - def __init__(self, root: Path): - self.root = root - self.name = root.name - self.utterances = None - self.utterance_cycler = None - - def _load_utterances(self): - with self.root.joinpath("_sources.txt").open("r") as sources_file: - sources = [l.split(",") for l in sources_file] - sources = {frames_fname: wave_fpath for frames_fname, wave_fpath in sources} - self.utterances = [Utterance(self.root.joinpath(f), w) for f, w in sources.items()] - self.utterance_cycler = RandomCycler(self.utterances) - - def random_partial(self, count, n_frames): - """ - Samples a batch of unique partial utterances from the disk in a way that all - utterances come up at least once every two cycles and in a random order every time. - - :param count: The number of partial utterances to sample from the set of utterances from - that speaker. Utterances are guaranteed not to be repeated if is not larger than - the number of utterances available. - :param n_frames: The number of frames in the partial utterance. - :return: A list of tuples (utterance, frames, range) where utterance is an Utterance, - frames are the frames of the partial utterances and range is the range of the partial - utterance with regard to the complete utterance. - """ - if self.utterances is None: - self._load_utterances() - - utterances = self.utterance_cycler.sample(count) - - a = [(u,) + u.random_partial(n_frames) for u in utterances] - - return a diff --git a/spaces/Kevin676/Real-Time-Voice-Cloning/encoder_train.py b/spaces/Kevin676/Real-Time-Voice-Cloning/encoder_train.py deleted file mode 100644 index b8740a894d615aadfe529cb36068fc8e3496125f..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/Real-Time-Voice-Cloning/encoder_train.py +++ /dev/null @@ -1,47 +0,0 @@ -from utils.argutils import print_args -from encoder.train import train -from pathlib import Path -import argparse - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="Trains the speaker encoder. You must have run encoder_preprocess.py first.", - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - - parser.add_argument("run_id", type=str, help= \ - "Name for this model instance. If a model state from the same run ID was previously " - "saved, the training will restart from there. Pass -f to overwrite saved states and " - "restart from scratch.") - parser.add_argument("clean_data_root", type=Path, help= \ - "Path to the output directory of encoder_preprocess.py. If you left the default " - "output directory when preprocessing, it should be /SV2TTS/encoder/.") - parser.add_argument("-m", "--models_dir", type=Path, default="encoder/saved_models/", help=\ - "Path to the output directory that will contain the saved model weights, as well as " - "backups of those weights and plots generated during training.") - parser.add_argument("-v", "--vis_every", type=int, default=10, help= \ - "Number of steps between updates of the loss and the plots.") - parser.add_argument("-u", "--umap_every", type=int, default=100, help= \ - "Number of steps between updates of the umap projection. Set to 0 to never update the " - "projections.") - parser.add_argument("-s", "--save_every", type=int, default=500, help= \ - "Number of steps between updates of the model on the disk. Set to 0 to never save the " - "model.") - parser.add_argument("-b", "--backup_every", type=int, default=7500, help= \ - "Number of steps between backups of the model. Set to 0 to never make backups of the " - "model.") - parser.add_argument("-f", "--force_restart", action="store_true", help= \ - "Do not load any saved model.") - parser.add_argument("--visdom_server", type=str, default="http://localhost") - parser.add_argument("--no_visdom", action="store_true", help= \ - "Disable visdom.") - args = parser.parse_args() - - # Process the arguments - args.models_dir.mkdir(exist_ok=True) - - # Run the training - print_args(args, parser) - train(**vars(args)) - \ No newline at end of file diff --git a/spaces/KrishnaBakshi1/YoutubeVideoSummarizer/app.py b/spaces/KrishnaBakshi1/YoutubeVideoSummarizer/app.py deleted file mode 100644 index 3a4ccaa211bfeb33728b35404c9edb2bca469643..0000000000000000000000000000000000000000 --- a/spaces/KrishnaBakshi1/YoutubeVideoSummarizer/app.py +++ /dev/null @@ -1,32 +0,0 @@ -#Import Libraries and Modules -from youtube_transcript_api import YouTubeTranscriptApi -import gradio as gr -from gradio.mix import Series - -#Define a function that extracts video transcripts using youtube video URLs. -def transcript_extracter(link): - video_id = link[link.index("=")+1:] - - transcript = YouTubeTranscriptApi.get_transcript(video_id) - script = "" - - for text in transcript: - t = text["text"] - if t != '[Music]': - script += t + " " - - return script - -generate_transcript_function = gr.Interface(transcript_extracter, 'text', 'text') -summarizer_model = gr.Interface.load("huggingface/sshleifer/distilbart-cnn-12-6") #Load transformers model. - -Demo_application = Series(generate_transcript_function, summarizer_model, - inputs = gr.inputs.Textbox(label = "Enter the YouTube URL: "), - outputs = gr.outputs.Textbox(label = "Youtube Video Summary"), - examples = ["https://www.youtube.com/watch?v=i0h7N1ukZ4A&ab_channel=TED", "https://www.youtube.com/watch?v=dv9q7Ema40k&t=3s&ab_channel=Simplilearn"], - title = "YouTube Video Summarizer", - theme = "grass", - description = "This application uses the sshleifer/distilbart-cnn-12-6 model to summarize Youtube videos. Click on one of the examples or Enter a video URL of your choice! Example 1 - The brain-changing benefits of exercise | TED , Example 2 - Future of AI | Simplilearn", - allow_flagging=False) - -Demo_application.launch() \ No newline at end of file diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/losses/accuracy.py b/spaces/KyanChen/RSPrompter/mmdet/models/losses/accuracy.py deleted file mode 100644 index d68484e13965ced3bd6b104071d22657a9b3fde6..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/models/losses/accuracy.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn - - -def accuracy(pred, target, topk=1, thresh=None): - """Calculate accuracy according to the prediction and target. - - Args: - pred (torch.Tensor): The model prediction, shape (N, num_class) - target (torch.Tensor): The target of each prediction, shape (N, ) - topk (int | tuple[int], optional): If the predictions in ``topk`` - matches the target, the predictions will be regarded as - correct ones. Defaults to 1. - thresh (float, optional): If not None, predictions with scores under - this threshold are considered incorrect. Default to None. - - Returns: - float | tuple[float]: If the input ``topk`` is a single integer, - the function will return a single float as accuracy. If - ``topk`` is a tuple containing multiple integers, the - function will return a tuple containing accuracies of - each ``topk`` number. - """ - assert isinstance(topk, (int, tuple)) - if isinstance(topk, int): - topk = (topk, ) - return_single = True - else: - return_single = False - - maxk = max(topk) - if pred.size(0) == 0: - accu = [pred.new_tensor(0.) for i in range(len(topk))] - return accu[0] if return_single else accu - assert pred.ndim == 2 and target.ndim == 1 - assert pred.size(0) == target.size(0) - assert maxk <= pred.size(1), \ - f'maxk {maxk} exceeds pred dimension {pred.size(1)}' - pred_value, pred_label = pred.topk(maxk, dim=1) - pred_label = pred_label.t() # transpose to shape (maxk, N) - correct = pred_label.eq(target.view(1, -1).expand_as(pred_label)) - if thresh is not None: - # Only prediction values larger than thresh are counted as correct - correct = correct & (pred_value > thresh).t() - res = [] - for k in topk: - correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) - res.append(correct_k.mul_(100.0 / pred.size(0))) - return res[0] if return_single else res - - -class Accuracy(nn.Module): - - def __init__(self, topk=(1, ), thresh=None): - """Module to calculate the accuracy. - - Args: - topk (tuple, optional): The criterion used to calculate the - accuracy. Defaults to (1,). - thresh (float, optional): If not None, predictions with scores - under this threshold are considered incorrect. Default to None. - """ - super().__init__() - self.topk = topk - self.thresh = thresh - - def forward(self, pred, target): - """Forward function to calculate accuracy. - - Args: - pred (torch.Tensor): Prediction of models. - target (torch.Tensor): Target for each prediction. - - Returns: - tuple[float]: The accuracies under different topk criterions. - """ - return accuracy(pred, target, self.topk, self.thresh) diff --git a/spaces/LanguageBind/LanguageBind/v_cls/video_transforms.py b/spaces/LanguageBind/LanguageBind/v_cls/video_transforms.py deleted file mode 100644 index ebc045e39f214315f5d754c7c7aaeb7524f06b4d..0000000000000000000000000000000000000000 --- a/spaces/LanguageBind/LanguageBind/v_cls/video_transforms.py +++ /dev/null @@ -1,1267 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. - -import math -import numbers -import random - -import numpy as np -import PIL -import torch -import torchvision -import torchvision.transforms.functional as F -from PIL import Image -from torchvision import transforms - -from . import functional as FF -from .rand_augment import rand_augment_transform -from .random_erasing import RandomErasing - -_pil_interpolation_to_str = { - Image.NEAREST: "PIL.Image.NEAREST", - Image.BILINEAR: "PIL.Image.BILINEAR", - Image.BICUBIC: "PIL.Image.BICUBIC", - Image.LANCZOS: "PIL.Image.LANCZOS", - Image.HAMMING: "PIL.Image.HAMMING", - Image.BOX: "PIL.Image.BOX", -} - -_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC) - - -def _pil_interp(method): - if method == "bicubic": - return Image.BICUBIC - elif method == "lanczos": - return Image.LANCZOS - elif method == "hamming": - return Image.HAMMING - else: - return Image.BILINEAR - - -def random_short_side_scale_jitter(images, - min_size, - max_size, - boxes=None, - inverse_uniform_sampling=False): - """ - Perform a spatial short scale jittering on the given images and - corresponding boxes. - Args: - images (tensor): images to perform scale jitter. Dimension is - `num frames` x `channel` x `height` x `width`. - min_size (int): the minimal size to scale the frames. - max_size (int): the maximal size to scale the frames. - boxes (ndarray): optional. Corresponding boxes to images. - Dimension is `num boxes` x 4. - inverse_uniform_sampling (bool): if True, sample uniformly in - [1 / max_scale, 1 / min_scale] and take a reciprocal to get the - scale. If False, take a uniform sample from [min_scale, max_scale]. - Returns: - (tensor): the scaled images with dimension of - `num frames` x `channel` x `new height` x `new width`. - (ndarray or None): the scaled boxes with dimension of - `num boxes` x 4. - """ - if inverse_uniform_sampling: - size = int( - round(1.0 / np.random.uniform(1.0 / max_size, 1.0 / min_size))) - else: - size = int(round(np.random.uniform(min_size, max_size))) - - height = images.shape[2] - width = images.shape[3] - if (width <= height and width == size) or (height <= width - and height == size): - return images, boxes - new_width = size - new_height = size - if width < height: - new_height = int(math.floor((float(height) / width) * size)) - if boxes is not None: - boxes = boxes * float(new_height) / height - else: - new_width = int(math.floor((float(width) / height) * size)) - if boxes is not None: - boxes = boxes * float(new_width) / width - - return ( - torch.nn.functional.interpolate( - images, - size=(new_height, new_width), - mode="bilinear", - align_corners=False, - ), - boxes, - ) - - -def crop_boxes(boxes, x_offset, y_offset): - """ - Peform crop on the bounding boxes given the offsets. - Args: - boxes (ndarray or None): bounding boxes to peform crop. The dimension - is `num boxes` x 4. - x_offset (int): cropping offset in the x axis. - y_offset (int): cropping offset in the y axis. - Returns: - cropped_boxes (ndarray or None): the cropped boxes with dimension of - `num boxes` x 4. - """ - cropped_boxes = boxes.copy() - cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset - cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset - - return cropped_boxes - - -def random_crop(images, size, boxes=None): - """ - Perform random spatial crop on the given images and corresponding boxes. - Args: - images (tensor): images to perform random crop. The dimension is - `num frames` x `channel` x `height` x `width`. - size (int): the size of height and width to crop on the image. - boxes (ndarray or None): optional. Corresponding boxes to images. - Dimension is `num boxes` x 4. - Returns: - cropped (tensor): cropped images with dimension of - `num frames` x `channel` x `size` x `size`. - cropped_boxes (ndarray or None): the cropped boxes with dimension of - `num boxes` x 4. - """ - if images.shape[2] == size and images.shape[3] == size: - return images - height = images.shape[2] - width = images.shape[3] - y_offset = 0 - if height > size: - y_offset = int(np.random.randint(0, height - size)) - x_offset = 0 - if width > size: - x_offset = int(np.random.randint(0, width - size)) - cropped = images[:, :, y_offset:y_offset + size, x_offset:x_offset + size] - - cropped_boxes = ( - crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None) - - return cropped, cropped_boxes - - -def horizontal_flip(prob, images, boxes=None): - """ - Perform horizontal flip on the given images and corresponding boxes. - Args: - prob (float): probility to flip the images. - images (tensor): images to perform horizontal flip, the dimension is - `num frames` x `channel` x `height` x `width`. - boxes (ndarray or None): optional. Corresponding boxes to images. - Dimension is `num boxes` x 4. - Returns: - images (tensor): images with dimension of - `num frames` x `channel` x `height` x `width`. - flipped_boxes (ndarray or None): the flipped boxes with dimension of - `num boxes` x 4. - """ - if boxes is None: - flipped_boxes = None - else: - flipped_boxes = boxes.copy() - - if np.random.uniform() < prob: - images = images.flip((-1)) - - if len(images.shape) == 3: - width = images.shape[2] - elif len(images.shape) == 4: - width = images.shape[3] - else: - raise NotImplementedError("Dimension does not supported") - if boxes is not None: - flipped_boxes[:, [0, 2]] = width - boxes[:, [2, 0]] - 1 - - return images, flipped_boxes - - -def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None): - """ - Perform uniform spatial sampling on the images and corresponding boxes. - Args: - images (tensor): images to perform uniform crop. The dimension is - `num frames` x `channel` x `height` x `width`. - size (int): size of height and weight to crop the images. - spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width - is larger than height. Or 0, 1, or 2 for top, center, and bottom - crop if height is larger than width. - boxes (ndarray or None): optional. Corresponding boxes to images. - Dimension is `num boxes` x 4. - scale_size (int): optinal. If not None, resize the images to scale_size before - performing any crop. - Returns: - cropped (tensor): images with dimension of - `num frames` x `channel` x `size` x `size`. - cropped_boxes (ndarray or None): the cropped boxes with dimension of - `num boxes` x 4. - """ - assert spatial_idx in [0, 1, 2] - ndim = len(images.shape) - if ndim == 3: - images = images.unsqueeze(0) - height = images.shape[2] - width = images.shape[3] - - if scale_size is not None: - if width <= height: - width, height = scale_size, int(height / width * scale_size) - else: - width, height = int(width / height * scale_size), scale_size - images = torch.nn.functional.interpolate( - images, - size=(height, width), - mode="bilinear", - align_corners=False, - ) - - y_offset = int(math.ceil((height - size) / 2)) - x_offset = int(math.ceil((width - size) / 2)) - - if height > width: - if spatial_idx == 0: - y_offset = 0 - elif spatial_idx == 2: - y_offset = height - size - else: - if spatial_idx == 0: - x_offset = 0 - elif spatial_idx == 2: - x_offset = width - size - cropped = images[:, :, y_offset:y_offset + size, x_offset:x_offset + size] - cropped_boxes = ( - crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None) - if ndim == 3: - cropped = cropped.squeeze(0) - return cropped, cropped_boxes - - -def clip_boxes_to_image(boxes, height, width): - """ - Clip an array of boxes to an image with the given height and width. - Args: - boxes (ndarray): bounding boxes to perform clipping. - Dimension is `num boxes` x 4. - height (int): given image height. - width (int): given image width. - Returns: - clipped_boxes (ndarray): the clipped boxes with dimension of - `num boxes` x 4. - """ - clipped_boxes = boxes.copy() - clipped_boxes[:, [0, 2]] = np.minimum(width - 1.0, - np.maximum(0.0, boxes[:, [0, 2]])) - clipped_boxes[:, [1, 3]] = np.minimum(height - 1.0, - np.maximum(0.0, boxes[:, [1, 3]])) - return clipped_boxes - - -def blend(images1, images2, alpha): - """ - Blend two images with a given weight alpha. - Args: - images1 (tensor): the first images to be blended, the dimension is - `num frames` x `channel` x `height` x `width`. - images2 (tensor): the second images to be blended, the dimension is - `num frames` x `channel` x `height` x `width`. - alpha (float): the blending weight. - Returns: - (tensor): blended images, the dimension is - `num frames` x `channel` x `height` x `width`. - """ - return images1 * alpha + images2 * (1 - alpha) - - -def grayscale(images): - """ - Get the grayscale for the input images. The channels of images should be - in order BGR. - Args: - images (tensor): the input images for getting grayscale. Dimension is - `num frames` x `channel` x `height` x `width`. - Returns: - img_gray (tensor): blended images, the dimension is - `num frames` x `channel` x `height` x `width`. - """ - # R -> 0.299, G -> 0.587, B -> 0.114. - img_gray = torch.tensor(images) - gray_channel = (0.299 * images[:, 2] + 0.587 * images[:, 1] + - 0.114 * images[:, 0]) - img_gray[:, 0] = gray_channel - img_gray[:, 1] = gray_channel - img_gray[:, 2] = gray_channel - return img_gray - - -def color_jitter(images, img_brightness=0, img_contrast=0, img_saturation=0): - """ - Perfrom a color jittering on the input images. The channels of images - should be in order BGR. - Args: - images (tensor): images to perform color jitter. Dimension is - `num frames` x `channel` x `height` x `width`. - img_brightness (float): jitter ratio for brightness. - img_contrast (float): jitter ratio for contrast. - img_saturation (float): jitter ratio for saturation. - Returns: - images (tensor): the jittered images, the dimension is - `num frames` x `channel` x `height` x `width`. - """ - - jitter = [] - if img_brightness != 0: - jitter.append("brightness") - if img_contrast != 0: - jitter.append("contrast") - if img_saturation != 0: - jitter.append("saturation") - - if len(jitter) > 0: - order = np.random.permutation(np.arange(len(jitter))) - for idx in range(0, len(jitter)): - if jitter[order[idx]] == "brightness": - images = brightness_jitter(img_brightness, images) - elif jitter[order[idx]] == "contrast": - images = contrast_jitter(img_contrast, images) - elif jitter[order[idx]] == "saturation": - images = saturation_jitter(img_saturation, images) - return images - - -def brightness_jitter(var, images): - """ - Perfrom brightness jittering on the input images. The channels of images - should be in order BGR. - Args: - var (float): jitter ratio for brightness. - images (tensor): images to perform color jitter. Dimension is - `num frames` x `channel` x `height` x `width`. - Returns: - images (tensor): the jittered images, the dimension is - `num frames` x `channel` x `height` x `width`. - """ - alpha = 1.0 + np.random.uniform(-var, var) - - img_bright = torch.zeros(images.shape) - images = blend(images, img_bright, alpha) - return images - - -def contrast_jitter(var, images): - """ - Perfrom contrast jittering on the input images. The channels of images - should be in order BGR. - Args: - var (float): jitter ratio for contrast. - images (tensor): images to perform color jitter. Dimension is - `num frames` x `channel` x `height` x `width`. - Returns: - images (tensor): the jittered images, the dimension is - `num frames` x `channel` x `height` x `width`. - """ - alpha = 1.0 + np.random.uniform(-var, var) - - img_gray = grayscale(images) - img_gray[:] = torch.mean(img_gray, dim=(1, 2, 3), keepdim=True) - images = blend(images, img_gray, alpha) - return images - - -def saturation_jitter(var, images): - """ - Perfrom saturation jittering on the input images. The channels of images - should be in order BGR. - Args: - var (float): jitter ratio for saturation. - images (tensor): images to perform color jitter. Dimension is - `num frames` x `channel` x `height` x `width`. - Returns: - images (tensor): the jittered images, the dimension is - `num frames` x `channel` x `height` x `width`. - """ - alpha = 1.0 + np.random.uniform(-var, var) - img_gray = grayscale(images) - images = blend(images, img_gray, alpha) - - return images - - -def lighting_jitter(images, alphastd, eigval, eigvec): - """ - Perform AlexNet-style PCA jitter on the given images. - Args: - images (tensor): images to perform lighting jitter. Dimension is - `num frames` x `channel` x `height` x `width`. - alphastd (float): jitter ratio for PCA jitter. - eigval (list): eigenvalues for PCA jitter. - eigvec (list[list]): eigenvectors for PCA jitter. - Returns: - out_images (tensor): the jittered images, the dimension is - `num frames` x `channel` x `height` x `width`. - """ - if alphastd == 0: - return images - # generate alpha1, alpha2, alpha3. - alpha = np.random.normal(0, alphastd, size=(1, 3)) - eig_vec = np.array(eigvec) - eig_val = np.reshape(eigval, (1, 3)) - rgb = np.sum( - eig_vec * np.repeat(alpha, 3, axis=0) * np.repeat(eig_val, 3, axis=0), - axis=1, - ) - out_images = torch.zeros_like(images) - if len(images.shape) == 3: - # C H W - channel_dim = 0 - elif len(images.shape) == 4: - # T C H W - channel_dim = 1 - else: - raise NotImplementedError(f"Unsupported dimension {len(images.shape)}") - - for idx in range(images.shape[channel_dim]): - # C H W - if len(images.shape) == 3: - out_images[idx] = images[idx] + rgb[2 - idx] - # T C H W - elif len(images.shape) == 4: - out_images[:, idx] = images[:, idx] + rgb[2 - idx] - else: - raise NotImplementedError( - f"Unsupported dimension {len(images.shape)}") - - return out_images - - -def color_normalization(images, mean, stddev): - """ - Perform color nomration on the given images. - Args: - images (tensor): images to perform color normalization. Dimension is - `num frames` x `channel` x `height` x `width`. - mean (list): mean values for normalization. - stddev (list): standard deviations for normalization. - - Returns: - out_images (tensor): the noramlized images, the dimension is - `num frames` x `channel` x `height` x `width`. - """ - if len(images.shape) == 3: - assert ( - len(mean) == images.shape[0]), "channel mean not computed properly" - assert (len(stddev) == images.shape[0] - ), "channel stddev not computed properly" - elif len(images.shape) == 4: - assert ( - len(mean) == images.shape[1]), "channel mean not computed properly" - assert (len(stddev) == images.shape[1] - ), "channel stddev not computed properly" - else: - raise NotImplementedError(f"Unsupported dimension {len(images.shape)}") - - out_images = torch.zeros_like(images) - for idx in range(len(mean)): - # C H W - if len(images.shape) == 3: - out_images[idx] = (images[idx] - mean[idx]) / stddev[idx] - elif len(images.shape) == 4: - out_images[:, idx] = (images[:, idx] - mean[idx]) / stddev[idx] - else: - raise NotImplementedError( - f"Unsupported dimension {len(images.shape)}") - return out_images - - -def _get_param_spatial_crop(scale, - ratio, - height, - width, - num_repeat=10, - log_scale=True, - switch_hw=False): - """ - Given scale, ratio, height and width, return sampled coordinates of the videos. - """ - for _ in range(num_repeat): - area = height * width - target_area = random.uniform(*scale) * area - if log_scale: - log_ratio = (math.log(ratio[0]), math.log(ratio[1])) - aspect_ratio = math.exp(random.uniform(*log_ratio)) - else: - aspect_ratio = random.uniform(*ratio) - - w = int(round(math.sqrt(target_area * aspect_ratio))) - h = int(round(math.sqrt(target_area / aspect_ratio))) - - if np.random.uniform() < 0.5 and switch_hw: - w, h = h, w - - if 0 < w <= width and 0 < h <= height: - i = random.randint(0, height - h) - j = random.randint(0, width - w) - return i, j, h, w - - # Fallback to central crop - in_ratio = float(width) / float(height) - if in_ratio < min(ratio): - w = width - h = int(round(w / min(ratio))) - elif in_ratio > max(ratio): - h = height - w = int(round(h * max(ratio))) - else: # whole image - w = width - h = height - i = (height - h) // 2 - j = (width - w) // 2 - return i, j, h, w - - -def random_resized_crop( - images, - target_height, - target_width, - scale=(0.8, 1.0), - ratio=(3.0 / 4.0, 4.0 / 3.0), -): - """ - Crop the given images to random size and aspect ratio. A crop of random - size (default: of 0.08 to 1.0) of the original size and a random aspect - ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This - crop is finally resized to given size. This is popularly used to train the - Inception networks. - - Args: - images: Images to perform resizing and cropping. - target_height: Desired height after cropping. - target_width: Desired width after cropping. - scale: Scale range of Inception-style area based random resizing. - ratio: Aspect ratio range of Inception-style area based random resizing. - """ - - height = images.shape[2] - width = images.shape[3] - - i, j, h, w = _get_param_spatial_crop(scale, ratio, height, width) - cropped = images[:, :, i:i + h, j:j + w] - return torch.nn.functional.interpolate( - cropped, - size=(target_height, target_width), - mode="bilinear", - align_corners=False, - ) - - -def random_resized_crop_with_shift( - images, - target_height, - target_width, - scale=(0.8, 1.0), - ratio=(3.0 / 4.0, 4.0 / 3.0), -): - """ - This is similar to random_resized_crop. However, it samples two different - boxes (for cropping) for the first and last frame. It then linearly - interpolates the two boxes for other frames. - - Args: - images: Images to perform resizing and cropping. - target_height: Desired height after cropping. - target_width: Desired width after cropping. - scale: Scale range of Inception-style area based random resizing. - ratio: Aspect ratio range of Inception-style area based random resizing. - """ - t = images.shape[1] - height = images.shape[2] - width = images.shape[3] - - i, j, h, w = _get_param_spatial_crop(scale, ratio, height, width) - i_, j_, h_, w_ = _get_param_spatial_crop(scale, ratio, height, width) - i_s = [int(i) for i in torch.linspace(i, i_, steps=t).tolist()] - j_s = [int(i) for i in torch.linspace(j, j_, steps=t).tolist()] - h_s = [int(i) for i in torch.linspace(h, h_, steps=t).tolist()] - w_s = [int(i) for i in torch.linspace(w, w_, steps=t).tolist()] - out = torch.zeros((3, t, target_height, target_width)) - for ind in range(t): - out[:, ind:ind + 1, :, :] = torch.nn.functional.interpolate( - images[:, ind:ind + 1, i_s[ind]:i_s[ind] + h_s[ind], - j_s[ind]:j_s[ind] + w_s[ind], ], - size=(target_height, target_width), - mode="bilinear", - align_corners=False, - ) - return out - - -def create_random_augment( - input_size, - auto_augment=None, - interpolation="bilinear", -): - """ - Get video randaug transform. - - Args: - input_size: The size of the input video in tuple. - auto_augment: Parameters for randaug. An example: - "rand-m7-n4-mstd0.5-inc1" (m is the magnitude and n is the number - of operations to apply). - interpolation: Interpolation method. - """ - if isinstance(input_size, tuple): - img_size = input_size[-2:] - else: - img_size = input_size - - if auto_augment: - assert isinstance(auto_augment, str) - if isinstance(img_size, tuple): - img_size_min = min(img_size) - else: - img_size_min = img_size - aa_params = {"translate_const": int(img_size_min * 0.45)} - if interpolation and interpolation != "random": - aa_params["interpolation"] = _pil_interp(interpolation) - if auto_augment.startswith("rand"): - return transforms.Compose( - [rand_augment_transform(auto_augment, aa_params)]) - raise NotImplementedError - - -def random_sized_crop_img( - im, - size, - jitter_scale=(0.08, 1.0), - jitter_aspect=(3.0 / 4.0, 4.0 / 3.0), - max_iter=10, -): - """ - Performs Inception-style cropping (used for training). - """ - assert (len( - im.shape) == 3), "Currently only support image for random_sized_crop" - h, w = im.shape[1:3] - i, j, h, w = _get_param_spatial_crop( - scale=jitter_scale, - ratio=jitter_aspect, - height=h, - width=w, - num_repeat=max_iter, - log_scale=False, - switch_hw=True, - ) - cropped = im[:, i:i + h, j:j + w] - return torch.nn.functional.interpolate( - cropped.unsqueeze(0), - size=(size, size), - mode="bilinear", - align_corners=False, - ).squeeze(0) - - -# The following code are modified based on timm lib, we will replace the following -# contents with dependency from PyTorchVideo. -# https://github.com/facebookresearch/pytorchvideo -class RandomResizedCropAndInterpolation: - """Crop the given PIL Image to random size and aspect ratio with random interpolation. - A crop of random size (default: of 0.08 to 1.0) of the original size and a random - aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop - is finally resized to given size. - This is popularly used to train the Inception networks. - Args: - size: expected output size of each edge - scale: range of size of the origin size cropped - ratio: range of aspect ratio of the origin aspect ratio cropped - interpolation: Default: PIL.Image.BILINEAR - """ - - def __init__( - self, - size, - scale=(0.08, 1.0), - ratio=(3.0 / 4.0, 4.0 / 3.0), - interpolation="bilinear", - ): - if isinstance(size, tuple): - self.size = size - else: - self.size = (size, size) - if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): - print("range should be of kind (min, max)") - - if interpolation == "random": - self.interpolation = _RANDOM_INTERPOLATION - else: - self.interpolation = _pil_interp(interpolation) - self.scale = scale - self.ratio = ratio - - @staticmethod - def get_params(img, scale, ratio): - """Get parameters for ``crop`` for a random sized crop. - Args: - img (PIL Image): Image to be cropped. - scale (tuple): range of size of the origin size cropped - ratio (tuple): range of aspect ratio of the origin aspect ratio cropped - Returns: - tuple: params (i, j, h, w) to be passed to ``crop`` for a random - sized crop. - """ - area = img.size[0] * img.size[1] - - for _ in range(10): - target_area = random.uniform(*scale) * area - log_ratio = (math.log(ratio[0]), math.log(ratio[1])) - aspect_ratio = math.exp(random.uniform(*log_ratio)) - - w = int(round(math.sqrt(target_area * aspect_ratio))) - h = int(round(math.sqrt(target_area / aspect_ratio))) - - if w <= img.size[0] and h <= img.size[1]: - i = random.randint(0, img.size[1] - h) - j = random.randint(0, img.size[0] - w) - return i, j, h, w - - # Fallback to central crop - in_ratio = img.size[0] / img.size[1] - if in_ratio < min(ratio): - w = img.size[0] - h = int(round(w / min(ratio))) - elif in_ratio > max(ratio): - h = img.size[1] - w = int(round(h * max(ratio))) - else: # whole image - w = img.size[0] - h = img.size[1] - i = (img.size[1] - h) // 2 - j = (img.size[0] - w) // 2 - return i, j, h, w - - def __call__(self, img): - """ - Args: - img (PIL Image): Image to be cropped and resized. - Returns: - PIL Image: Randomly cropped and resized image. - """ - i, j, h, w = self.get_params(img, self.scale, self.ratio) - if isinstance(self.interpolation, (tuple, list)): - interpolation = random.choice(self.interpolation) - else: - interpolation = self.interpolation - return F.resized_crop(img, i, j, h, w, self.size, interpolation) - - def __repr__(self): - if isinstance(self.interpolation, (tuple, list)): - interpolate_str = " ".join( - [_pil_interpolation_to_str[x] for x in self.interpolation]) - else: - interpolate_str = _pil_interpolation_to_str[self.interpolation] - format_string = self.__class__.__name__ + "(size={0}".format(self.size) - format_string += ", scale={0}".format( - tuple(round(s, 4) for s in self.scale)) - format_string += ", ratio={0}".format( - tuple(round(r, 4) for r in self.ratio)) - format_string += ", interpolation={0})".format(interpolate_str) - return format_string - - -def transforms_imagenet_train( - img_size=224, - scale=None, - ratio=None, - hflip=0.5, - vflip=0.0, - color_jitter=0.4, - auto_augment=None, - interpolation="random", - use_prefetcher=False, - mean=(0.485, 0.456, 0.406), - std=(0.229, 0.224, 0.225), - re_prob=0.0, - re_mode="const", - re_count=1, - re_num_splits=0, - separate=False, -): - """ - If separate==True, the transforms are returned as a tuple of 3 separate transforms - for use in a mixing dataset that passes - * all data through the first (primary) transform, called the 'clean' data - * a portion of the data through the secondary transform - * normalizes and converts the branches above with the third, final transform - """ - if isinstance(img_size, tuple): - img_size = img_size[-2:] - else: - img_size = img_size - - scale = tuple(scale or (0.08, 1.0)) # default imagenet scale range - ratio = tuple(ratio - or (3.0 / 4.0, 4.0 / 3.0)) # default imagenet ratio range - primary_tfl = [ - RandomResizedCropAndInterpolation( - img_size, scale=scale, ratio=ratio, interpolation=interpolation) - ] - if hflip > 0.0: - primary_tfl += [transforms.RandomHorizontalFlip(p=hflip)] - if vflip > 0.0: - primary_tfl += [transforms.RandomVerticalFlip(p=vflip)] - - secondary_tfl = [] - if auto_augment: - assert isinstance(auto_augment, str) - if isinstance(img_size, tuple): - img_size_min = min(img_size) - else: - img_size_min = img_size - aa_params = dict( - translate_const=int(img_size_min * 0.45), - img_mean=tuple([min(255, round(255 * x)) for x in mean]), - ) - if interpolation and interpolation != "random": - aa_params["interpolation"] = _pil_interp(interpolation) - if auto_augment.startswith("rand"): - secondary_tfl += [rand_augment_transform(auto_augment, aa_params)] - elif auto_augment.startswith("augmix"): - raise NotImplementedError("Augmix not implemented") - else: - raise NotImplementedError("Auto aug not implemented") - elif color_jitter is not None: - # color jitter is enabled when not using AA - if isinstance(color_jitter, (list, tuple)): - # color jitter should be a 3-tuple/list if spec brightness/contrast/saturation - # or 4 if also augmenting hue - assert len(color_jitter) in (3, 4) - else: - # if it's a scalar, duplicate for brightness, contrast, and saturation, no hue - color_jitter = (float(color_jitter), ) * 3 - secondary_tfl += [transforms.ColorJitter(*color_jitter)] - - final_tfl = [] - final_tfl += [ - transforms.ToTensor(), - transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std)), - ] - if re_prob > 0.0: - final_tfl.append( - RandomErasing( - re_prob, - mode=re_mode, - max_count=re_count, - num_splits=re_num_splits, - device="cpu", - cube=False, - )) - - if separate: - return ( - transforms.Compose(primary_tfl), - transforms.Compose(secondary_tfl), - transforms.Compose(final_tfl), - ) - else: - return transforms.Compose(primary_tfl + secondary_tfl + final_tfl) - - -############################################################################################################ -############################################################################################################ - - -class Compose(object): - """Composes several transforms - Args: - transforms (list of ``Transform`` objects): list of transforms - to compose - """ - - def __init__(self, transforms): - self.transforms = transforms - - def __call__(self, clip): - for t in self.transforms: - clip = t(clip) - return clip - - -class RandomHorizontalFlip(object): - """Horizontally flip the list of given images randomly - with a probability 0.5 - """ - - def __call__(self, clip): - """ - Args: - img (PIL.Image or numpy.ndarray): List of images to be cropped - in format (h, w, c) in numpy.ndarray - Returns: - PIL.Image or numpy.ndarray: Randomly flipped clip - """ - if random.random() < 0.5: - if isinstance(clip[0], np.ndarray): - return [np.fliplr(img) for img in clip] - elif isinstance(clip[0], PIL.Image.Image): - return [ - img.transpose(PIL.Image.FLIP_LEFT_RIGHT) for img in clip - ] - else: - raise TypeError('Expected numpy.ndarray or PIL.Image' + - ' but got list of {0}'.format(type(clip[0]))) - return clip - - -class RandomResize(object): - """Resizes a list of (H x W x C) numpy.ndarray to the final size - The larger the original image is, the more times it takes to - interpolate - Args: - interpolation (str): Can be one of 'nearest', 'bilinear' - defaults to nearest - size (tuple): (widht, height) - """ - - def __init__(self, ratio=(3. / 4., 4. / 3.), interpolation='nearest'): - self.ratio = ratio - self.interpolation = interpolation - - def __call__(self, clip): - scaling_factor = random.uniform(self.ratio[0], self.ratio[1]) - - if isinstance(clip[0], np.ndarray): - im_h, im_w, im_c = clip[0].shape - elif isinstance(clip[0], PIL.Image.Image): - im_w, im_h = clip[0].size - - new_w = int(im_w * scaling_factor) - new_h = int(im_h * scaling_factor) - new_size = (new_w, new_h) - resized = FF.resize_clip( - clip, new_size, interpolation=self.interpolation) - return resized - - -class Resize(object): - """Resizes a list of (H x W x C) numpy.ndarray to the final size - The larger the original image is, the more times it takes to - interpolate - Args: - interpolation (str): Can be one of 'nearest', 'bilinear' - defaults to nearest - size (tuple): (widht, height) - """ - - def __init__(self, size, interpolation='nearest'): - self.size = size - self.interpolation = interpolation - - def __call__(self, clip): - resized = FF.resize_clip( - clip, self.size, interpolation=self.interpolation) - return resized - - -class RandomCrop(object): - """Extract random crop at the same location for a list of images - Args: - size (sequence or int): Desired output size for the - crop in format (h, w) - """ - - def __init__(self, size): - if isinstance(size, numbers.Number): - size = (size, size) - - self.size = size - - def __call__(self, clip): - """ - Args: - img (PIL.Image or numpy.ndarray): List of images to be cropped - in format (h, w, c) in numpy.ndarray - Returns: - PIL.Image or numpy.ndarray: Cropped list of images - """ - h, w = self.size - if isinstance(clip[0], np.ndarray): - im_h, im_w, im_c = clip[0].shape - elif isinstance(clip[0], PIL.Image.Image): - im_w, im_h = clip[0].size - else: - raise TypeError('Expected numpy.ndarray or PIL.Image' + - 'but got list of {0}'.format(type(clip[0]))) - if w > im_w or h > im_h: - error_msg = ( - 'Initial image size should be larger then ' - 'cropped size but got cropped sizes : ({w}, {h}) while ' - 'initial image is ({im_w}, {im_h})'.format( - im_w=im_w, im_h=im_h, w=w, h=h)) - raise ValueError(error_msg) - - x1 = random.randint(0, im_w - w) - y1 = random.randint(0, im_h - h) - cropped = FF.crop_clip(clip, y1, x1, h, w) - - return cropped - - -class ThreeCrop(object): - """Extract random crop at the same location for a list of images - Args: - size (sequence or int): Desired output size for the - crop in format (h, w) - """ - - def __init__(self, size): - if isinstance(size, numbers.Number): - size = (size, size) - - self.size = size - - def __call__(self, clip): - """ - Args: - img (PIL.Image or numpy.ndarray): List of images to be cropped - in format (h, w, c) in numpy.ndarray - Returns: - PIL.Image or numpy.ndarray: Cropped list of images - """ - h, w = self.size - if isinstance(clip[0], np.ndarray): - im_h, im_w, im_c = clip[0].shape - elif isinstance(clip[0], PIL.Image.Image): - im_w, im_h = clip[0].size - else: - raise TypeError('Expected numpy.ndarray or PIL.Image' + - 'but got list of {0}'.format(type(clip[0]))) - if w != im_w and h != im_h: - clip = FF.resize_clip(clip, self.size, interpolation="bilinear") - im_h, im_w, im_c = clip[0].shape - - step = np.max((np.max((im_w, im_h)) - self.size[0]) // 2, 0) - cropped = [] - for i in range(3): - if (im_h > self.size[0]): - x1 = 0 - y1 = i * step - cropped.extend(FF.crop_clip(clip, y1, x1, h, w)) - else: - x1 = i * step - y1 = 0 - cropped.extend(FF.crop_clip(clip, y1, x1, h, w)) - return cropped - - -class RandomRotation(object): - """Rotate entire clip randomly by a random angle within - given bounds - Args: - degrees (sequence or int): Range of degrees to select from - If degrees is a number instead of sequence like (min, max), - the range of degrees, will be (-degrees, +degrees). - """ - - def __init__(self, degrees): - if isinstance(degrees, numbers.Number): - if degrees < 0: - raise ValueError('If degrees is a single number,' - 'must be positive') - degrees = (-degrees, degrees) - else: - if len(degrees) != 2: - raise ValueError('If degrees is a sequence,' - 'it must be of len 2.') - - self.degrees = degrees - - def __call__(self, clip): - """ - Args: - img (PIL.Image or numpy.ndarray): List of images to be cropped - in format (h, w, c) in numpy.ndarray - Returns: - PIL.Image or numpy.ndarray: Cropped list of images - """ - import skimage - angle = random.uniform(self.degrees[0], self.degrees[1]) - if isinstance(clip[0], np.ndarray): - rotated = [skimage.transform.rotate(img, angle) for img in clip] - elif isinstance(clip[0], PIL.Image.Image): - rotated = [img.rotate(angle) for img in clip] - else: - raise TypeError('Expected numpy.ndarray or PIL.Image' + - 'but got list of {0}'.format(type(clip[0]))) - - return rotated - - -class CenterCrop(object): - """Extract center crop at the same location for a list of images - Args: - size (sequence or int): Desired output size for the - crop in format (h, w) - """ - - def __init__(self, size): - if isinstance(size, numbers.Number): - size = (size, size) - - self.size = size - - def __call__(self, clip): - """ - Args: - img (PIL.Image or numpy.ndarray): List of images to be cropped - in format (h, w, c) in numpy.ndarray - Returns: - PIL.Image or numpy.ndarray: Cropped list of images - """ - h, w = self.size - if isinstance(clip[0], np.ndarray): - im_h, im_w, im_c = clip[0].shape - elif isinstance(clip[0], PIL.Image.Image): - im_w, im_h = clip[0].size - else: - raise TypeError('Expected numpy.ndarray or PIL.Image' + - 'but got list of {0}'.format(type(clip[0]))) - if w > im_w or h > im_h: - error_msg = ( - 'Initial image size should be larger then ' - 'cropped size but got cropped sizes : ({w}, {h}) while ' - 'initial image is ({im_w}, {im_h})'.format( - im_w=im_w, im_h=im_h, w=w, h=h)) - raise ValueError(error_msg) - - x1 = int(round((im_w - w) / 2.)) - y1 = int(round((im_h - h) / 2.)) - cropped = FF.crop_clip(clip, y1, x1, h, w) - - return cropped - - -class ColorJitter(object): - """Randomly change the brightness, contrast and saturation and hue of the clip - Args: - brightness (float): How much to jitter brightness. brightness_factor - is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]. - contrast (float): How much to jitter contrast. contrast_factor - is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]. - saturation (float): How much to jitter saturation. saturation_factor - is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]. - hue(float): How much to jitter hue. hue_factor is chosen uniformly from - [-hue, hue]. Should be >=0 and <= 0.5. - """ - - def __init__(self, brightness=0, contrast=0, saturation=0, hue=0): - self.brightness = brightness - self.contrast = contrast - self.saturation = saturation - self.hue = hue - - def get_params(self, brightness, contrast, saturation, hue): - if brightness > 0: - brightness_factor = random.uniform( - max(0, 1 - brightness), 1 + brightness) - else: - brightness_factor = None - - if contrast > 0: - contrast_factor = random.uniform( - max(0, 1 - contrast), 1 + contrast) - else: - contrast_factor = None - - if saturation > 0: - saturation_factor = random.uniform( - max(0, 1 - saturation), 1 + saturation) - else: - saturation_factor = None - - if hue > 0: - hue_factor = random.uniform(-hue, hue) - else: - hue_factor = None - return brightness_factor, contrast_factor, saturation_factor, hue_factor - - def __call__(self, clip): - """ - Args: - clip (list): list of PIL.Image - Returns: - list PIL.Image : list of transformed PIL.Image - """ - if isinstance(clip[0], np.ndarray): - raise TypeError( - 'Color jitter not yet implemented for numpy arrays') - elif isinstance(clip[0], PIL.Image.Image): - brightness, contrast, saturation, hue = self.get_params( - self.brightness, self.contrast, self.saturation, self.hue) - - # Create img transform function sequence - img_transforms = [] - if brightness is not None: - img_transforms.append( - lambda img: torchvision.transforms.functional. - adjust_brightness(img, brightness)) - if saturation is not None: - img_transforms.append( - lambda img: torchvision.transforms.functional. - adjust_saturation(img, saturation)) - if hue is not None: - img_transforms.append(lambda img: torchvision.transforms. - functional.adjust_hue(img, hue)) - if contrast is not None: - img_transforms.append( - lambda img: torchvision.transforms.functional. - adjust_contrast(img, contrast)) - random.shuffle(img_transforms) - - # Apply to all images - jittered_clip = [] - for img in clip: - for func in img_transforms: - jittered_img = func(img) - jittered_clip.append(jittered_img) - - else: - raise TypeError('Expected numpy.ndarray or PIL.Image' + - 'but got list of {0}'.format(type(clip[0]))) - return jittered_clip - - -class Normalize(object): - """Normalize a clip with mean and standard deviation. - Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform - will normalize each channel of the input ``torch.*Tensor`` i.e. - ``input[channel] = (input[channel] - mean[channel]) / std[channel]`` - .. note:: - This transform acts out of place, i.e., it does not mutates the input tensor. - Args: - mean (sequence): Sequence of means for each channel. - std (sequence): Sequence of standard deviations for each channel. - """ - - def __init__(self, mean, std): - self.mean = mean - self.std = std - - def __call__(self, clip): - """ - Args: - clip (Tensor): Tensor clip of size (T, C, H, W) to be normalized. - Returns: - Tensor: Normalized Tensor clip. - """ - return FF.normalize(clip, self.mean, self.std) - - def __repr__(self): - return self.__class__.__name__ + '(mean={0}, std={1})'.format( - self.mean, self.std) diff --git a/spaces/Lianjd/stock_dashboard/backtrader/indicators/dv2.py b/spaces/Lianjd/stock_dashboard/backtrader/indicators/dv2.py deleted file mode 100644 index abec317fdac1c81b7da2c6cba22509e15a9010d5..0000000000000000000000000000000000000000 --- a/spaces/Lianjd/stock_dashboard/backtrader/indicators/dv2.py +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8; py-indent-offset:4 -*- -############################################################################### -# -# Copyright (C) 2015-2020 Daniel Rodriguez -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### -from __future__ import (absolute_import, division, print_function, - unicode_literals) - - -from . import Indicator, SMA, PercentRank - - -__all__ = ['DV2'] - - -class DV2(Indicator): - ''' - RSI(2) alternative - Developed by David Varadi of http://cssanalytics.wordpress.com/ - - This seems to be the *Bounded* version. - - See also: - - - http://web.archive.org/web/20131216100741/http://quantingdutchman.wordpress.com/2010/08/06/dv2-indicator-for-amibroker/ - - ''' - params = ( - ('period', 252), - ('maperiod', 2), - ('_movav', SMA), - ) - lines = ('dv2',) - - def __init__(self): - chl = self.data.close / ((self.data.high + self.data.low) / 2.0) - dvu = self.p._movav(chl, period=self.p.maperiod) - self.lines.dv2 = PercentRank(dvu, period=self.p.period) * 100 - super(DV2, self).__init__() diff --git a/spaces/LightChen2333/OpenSLU/common/loader.py b/spaces/LightChen2333/OpenSLU/common/loader.py deleted file mode 100644 index 5b97680ae3ef9b5523cce591e30c520059ff36a8..0000000000000000000000000000000000000000 --- a/spaces/LightChen2333/OpenSLU/common/loader.py +++ /dev/null @@ -1,332 +0,0 @@ -''' -Author: Qiguang Chen -Date: 2023-01-11 10:39:26 -LastEditors: Qiguang Chen -LastEditTime: 2023-02-19 15:39:48 -Description: all class for load data. - -''' -import os -import torch -import json -from datasets import load_dataset, Dataset -from torch.utils.data import DataLoader - -from common.utils import InputData - -ABS_PATH=os.path.join(os.path.abspath(os.path.dirname(__file__)), "../") - -class DataFactory(object): - def __init__(self, tokenizer,use_multi_intent=False, to_lower_case=True): - """_summary_ - - Args: - tokenizer (Tokenizer): _description_ - use_multi_intent (bool, optional): _description_. Defaults to False. - """ - self.tokenizer = tokenizer - self.slot_label_list = [] - self.intent_label_list = [] - self.use_multi = use_multi_intent - self.to_lower_case = to_lower_case - self.slot_label_dict = None - self.intent_label_dict = None - - def __is_supported_datasets(self, dataset_name:str)->bool: - return dataset_name.lower() in ["atis", "snips", "mix-atis", "mix-atis"] - - def load_dataset(self, dataset_config, split="train"): - dataset_name = None - if split not in dataset_config: - dataset_name = dataset_config.get("dataset_name") - elif self.__is_supported_datasets(dataset_config[split]): - dataset_name = dataset_config[split].lower() - if dataset_name is not None: - return load_dataset("LightChen2333/OpenSLU", dataset_name, split=split) - else: - data_file = dataset_config[split] - data_dict = {"text": [], "slot": [], "intent":[]} - with open(data_file, encoding="utf-8") as f: - for line in f: - row = json.loads(line) - data_dict["text"].append(row["text"]) - data_dict["slot"].append(row["slot"]) - data_dict["intent"].append(row["intent"]) - return Dataset.from_dict(data_dict) - - def update_label_names(self, dataset): - for intent_labels in dataset["intent"]: - if self.use_multi: - intent_label = intent_labels.split("#") - else: - intent_label = [intent_labels] - for x in intent_label: - if x not in self.intent_label_list: - self.intent_label_list.append(x) - for slot_label in dataset["slot"]: - for x in slot_label: - if x not in self.slot_label_list: - self.slot_label_list.append(x) - self.intent_label_dict = {key: index for index, - key in enumerate(self.intent_label_list)} - self.slot_label_dict = {key: index for index, - key in enumerate(self.slot_label_list)} - - def update_vocabulary(self, dataset): - if self.tokenizer.name_or_path in ["word_tokenizer"]: - for data in dataset: - self.tokenizer.add_instance(data["text"]) - - @staticmethod - def fast_align_data(text, padding_side="right"): - for i in range(len(text.input_ids)): - desired_output = [] - for word_id in text.word_ids(i): - if word_id is not None: - start, end = text.word_to_tokens( - i, word_id, sequence_index=0 if padding_side == "right" else 1) - if start == end - 1: - tokens = [start] - else: - tokens = [start, end - 1] - if len(desired_output) == 0 or desired_output[-1] != tokens: - desired_output.append(tokens) - yield desired_output - - def fast_align(self, - batch, - ignore_index=-100, - device="cuda", - config=None, - enable_label=True, - label2tensor=True): - if self.to_lower_case: - input_list = [[t.lower() for t in x["text"]] for x in batch] - else: - input_list = [x["text"] for x in batch] - text = self.tokenizer(input_list, - return_tensors="pt", - padding=True, - is_split_into_words=True, - truncation=True, - **config).to(device) - if enable_label: - if label2tensor: - - slot_mask = torch.ones_like(text.input_ids) * ignore_index - for i, offsets in enumerate( - DataFactory.fast_align_data(text, padding_side=self.tokenizer.padding_side)): - num = 0 - assert len(offsets) == len(batch[i]["text"]) - assert len(offsets) == len(batch[i]["slot"]) - for off in offsets: - slot_mask[i][off[0] - ] = self.slot_label_dict[batch[i]["slot"][num]] - num += 1 - slot = slot_mask.clone() - attentin_id = 0 if self.tokenizer.padding_side == "right" else 1 - for i, slot_batch in enumerate(slot): - for j, x in enumerate(slot_batch): - if x == ignore_index and text.attention_mask[i][j] == attentin_id and (text.input_ids[i][ - j] not in self.tokenizer.all_special_ids or text.input_ids[i][j] == self.tokenizer.unk_token_id): - slot[i][j] = slot[i][j - 1] - slot = slot.to(device) - if not self.use_multi: - intent = torch.tensor( - [self.intent_label_dict[x["intent"]] for x in batch]).to(device) - else: - one_hot = torch.zeros( - (len(batch), len(self.intent_label_list)), dtype=torch.float) - for index, b in enumerate(batch): - for x in b["intent"].split("#"): - one_hot[index][self.intent_label_dict[x]] = 1. - intent = one_hot.to(device) - else: - slot_mask = None - slot = [['#' for _ in range(text.input_ids.shape[1])] - for _ in range(text.input_ids.shape[0])] - for i, offsets in enumerate(DataFactory.fast_align_data(text)): - num = 0 - for off in offsets: - slot[i][off[0]] = batch[i]["slot"][num] - num += 1 - if not self.use_multi: - intent = [x["intent"] for x in batch] - else: - intent = [ - [x for x in b["intent"].split("#")] for b in batch] - return InputData((text, slot, intent)) - else: - return InputData((text, None, None)) - - def general_align_data(self, split_text_list, raw_text_list, encoded_text): - for i in range(len(split_text_list)): - desired_output = [] - jdx = 0 - offset = encoded_text.offset_mapping[i].tolist() - split_texts = split_text_list[i] - raw_text = raw_text_list[i] - last = 0 - temp_offset = [] - for off in offset: - s, e = off - if len(temp_offset) > 0 and (e != 0 and last == s): - len_1 = off[1] - off[0] - len_2 = temp_offset[-1][1] - temp_offset[-1][0] - if len_1 > len_2: - temp_offset.pop(-1) - temp_offset.append([0, 0]) - temp_offset.append(off) - continue - temp_offset.append(off) - last = s - offset = temp_offset - for split_text in split_texts: - while jdx < len(offset) and offset[jdx][0] == 0 and offset[jdx][1] == 0: - jdx += 1 - if jdx == len(offset): - continue - start_, end_ = offset[jdx] - tokens = None - if split_text == raw_text[start_:end_].strip(): - tokens = [jdx] - else: - # Compute "xxx" -> "xx" "#x" - temp_jdx = jdx - last_str = raw_text[start_:end_].strip() - while last_str != split_text and temp_jdx < len(offset) - 1: - temp_jdx += 1 - last_str += raw_text[offset[temp_jdx] - [0]:offset[temp_jdx][1]].strip() - - if temp_jdx == jdx: - raise ValueError("Illegal Input data") - elif last_str == split_text: - tokens = [jdx, temp_jdx] - jdx = temp_jdx - else: - jdx -= 1 - jdx += 1 - if tokens is not None: - desired_output.append(tokens) - yield desired_output - - def general_align(self, - batch, - ignore_index=-100, - device="cuda", - config=None, - enable_label=True, - label2tensor=True, - locale="en-US"): - if self.to_lower_case: - raw_data = [" ".join(x["text"]).lower() if locale not in ['ja-JP', 'zh-CN', 'zh-TW'] else "".join(x["text"]) for x in - batch] - input_list = [[t.lower() for t in x["text"]] for x in batch] - else: - input_list = [x["text"] for x in batch] - raw_data = [" ".join(x["text"]) if locale not in ['ja-JP', 'zh-CN', 'zh-TW'] else "".join(x["text"]) for x in - batch] - text = self.tokenizer(raw_data, - return_tensors="pt", - padding=True, - truncation=True, - return_offsets_mapping=True, - **config).to(device) - if enable_label: - if label2tensor: - slot_mask = torch.ones_like(text.input_ids) * ignore_index - for i, offsets in enumerate( - self.general_align_data(input_list, raw_data, encoded_text=text)): - num = 0 - # if len(offsets) != len(batch[i]["text"]) or len(offsets) != len(batch[i]["slot"]): - # if - for off in offsets: - slot_mask[i][off[0] - ] = self.slot_label_dict[batch[i]["slot"][num]] - num += 1 - # slot = slot_mask.clone() - # attentin_id = 0 if self.tokenizer.padding_side == "right" else 1 - # for i, slot_batch in enumerate(slot): - # for j, x in enumerate(slot_batch): - # if x == ignore_index and text.attention_mask[i][j] == attentin_id and text.input_ids[i][ - # j] not in self.tokenizer.all_special_ids: - # slot[i][j] = slot[i][j - 1] - slot = slot_mask.to(device) - if not self.use_multi: - intent = torch.tensor( - [self.intent_label_dict[x["intent"]] for x in batch]).to(device) - else: - one_hot = torch.zeros( - (len(batch), len(self.intent_label_list)), dtype=torch.float) - for index, b in enumerate(batch): - for x in b["intent"].split("#"): - one_hot[index][self.intent_label_dict[x]] = 1. - intent = one_hot.to(device) - else: - slot_mask = None - slot = [['#' for _ in range(text.input_ids.shape[1])] - for _ in range(text.input_ids.shape[0])] - for i, offsets in enumerate(self.general_align_data(input_list, raw_data, encoded_text=text)): - num = 0 - for off in offsets: - slot[i][off[0]] = batch[i]["slot"][num] - num += 1 - if not self.use_multi: - intent = [x["intent"] for x in batch] - else: - intent = [ - [x for x in b["intent"].split("#")] for b in batch] - return InputData((text, slot, intent)) - else: - return InputData((text, None, None)) - - def batch_fn(self, - batch, - ignore_index=-100, - device="cuda", - config=None, - align_mode="fast", - enable_label=True, - label2tensor=True): - if align_mode == "fast": - # try: - return self.fast_align(batch, - ignore_index=ignore_index, - device=device, - config=config, - enable_label=enable_label, - label2tensor=label2tensor) - # except: - # return self.general_align(batch, - # ignore_index=ignore_index, - # device=device, - # config=config, - # enable_label=enable_label, - # label2tensor=label2tensor) - else: - return self.general_align(batch, - ignore_index=ignore_index, - device=device, - config=config, - enable_label=enable_label, - label2tensor=label2tensor) - - def get_data_loader(self, - dataset, - batch_size, - shuffle=False, - device="cuda", - enable_label=True, - align_mode="fast", - label2tensor=True, **config): - data_loader = DataLoader(dataset, - shuffle=shuffle, - batch_size=batch_size, - collate_fn=lambda x: self.batch_fn(x, - device=device, - config=config, - enable_label=enable_label, - align_mode=align_mode, - label2tensor=label2tensor)) - return data_loader diff --git a/spaces/Liu-LAB/GPT-academic/request_llm/bridge_jittorllms_llama.py b/spaces/Liu-LAB/GPT-academic/request_llm/bridge_jittorllms_llama.py deleted file mode 100644 index d4853578fadff9f572c86f7f0fe79f9c8c8c1474..0000000000000000000000000000000000000000 --- a/spaces/Liu-LAB/GPT-academic/request_llm/bridge_jittorllms_llama.py +++ /dev/null @@ -1,175 +0,0 @@ - -from transformers import AutoModel, AutoTokenizer -import time -import threading -import importlib -from toolbox import update_ui, get_conf -from multiprocessing import Process, Pipe - -load_message = "jittorllms尚未加载,加载需要一段时间。注意,请避免混用多种jittor模型,否则可能导致显存溢出而造成卡顿,取决于`config.py`的配置,jittorllms消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……" - -################################################################################# -class GetGLMHandle(Process): - def __init__(self): - super().__init__(daemon=True) - self.parent, self.child = Pipe() - self.jittorllms_model = None - self.info = "" - self.local_history = [] - self.success = True - self.check_dependency() - self.start() - self.threadLock = threading.Lock() - - def check_dependency(self): - try: - import pandas - self.info = "依赖检测通过" - self.success = True - except: - from toolbox import trimmed_format_exc - self.info = r"缺少jittorllms的依赖,如果要使用jittorllms,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\ - r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llm/jittorllms`两个指令来安装jittorllms的依赖(在项目根目录运行这两个指令)。" +\ - r"警告:安装jittorllms依赖后将完全破坏现有的pytorch环境,建议使用docker环境!" + trimmed_format_exc() - self.success = False - - def ready(self): - return self.jittorllms_model is not None - - def run(self): - # 子进程执行 - # 第一次运行,加载参数 - def validate_path(): - import os, sys - dir_name = os.path.dirname(__file__) - env = os.environ.get("PATH", "") - os.environ["PATH"] = env.replace('/cuda/bin', '/x/bin') - root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..') - os.chdir(root_dir_assume + '/request_llm/jittorllms') - sys.path.append(root_dir_assume + '/request_llm/jittorllms') - validate_path() # validate path so you can run from base directory - - def load_model(): - import types - try: - if self.jittorllms_model is None: - device, = get_conf('LOCAL_MODEL_DEVICE') - from .jittorllms.models import get_model - # availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"] - args_dict = {'model': 'llama'} - print('self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))') - self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict)) - print('done get model') - except: - self.child.send('[Local Message] Call jittorllms fail 不能正常加载jittorllms的参数。') - raise RuntimeError("不能正常加载jittorllms的参数!") - print('load_model') - load_model() - - # 进入任务等待状态 - print('进入任务等待状态') - while True: - # 进入任务等待状态 - kwargs = self.child.recv() - query = kwargs['query'] - history = kwargs['history'] - # 是否重置 - if len(self.local_history) > 0 and len(history)==0: - print('触发重置') - self.jittorllms_model.reset() - self.local_history.append(query) - - print('收到消息,开始请求') - try: - for response in self.jittorllms_model.stream_chat(query, history): - print(response) - self.child.send(response) - except: - from toolbox import trimmed_format_exc - print(trimmed_format_exc()) - self.child.send('[Local Message] Call jittorllms fail.') - # 请求处理结束,开始下一个循环 - self.child.send('[Finish]') - - def stream_chat(self, **kwargs): - # 主进程执行 - self.threadLock.acquire() - self.parent.send(kwargs) - while True: - res = self.parent.recv() - if res != '[Finish]': - yield res - else: - break - self.threadLock.release() - -global llama_glm_handle -llama_glm_handle = None -################################################################################# -def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): - """ - 多线程方法 - 函数的说明请见 request_llm/bridge_all.py - """ - global llama_glm_handle - if llama_glm_handle is None: - llama_glm_handle = GetGLMHandle() - if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + llama_glm_handle.info - if not llama_glm_handle.success: - error = llama_glm_handle.info - llama_glm_handle = None - raise RuntimeError(error) - - # jittorllms 没有 sys_prompt 接口,因此把prompt加入 history - history_feedin = [] - for i in range(len(history)//2): - history_feedin.append([history[2*i], history[2*i+1]] ) - - watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可 - response = "" - for response in llama_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): - print(response) - if len(observe_window) >= 1: observe_window[0] = response - if len(observe_window) >= 2: - if (time.time()-observe_window[1]) > watch_dog_patience: - raise RuntimeError("程序终止。") - return response - - - -def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): - """ - 单线程方法 - 函数的说明请见 request_llm/bridge_all.py - """ - chatbot.append((inputs, "")) - - global llama_glm_handle - if llama_glm_handle is None: - llama_glm_handle = GetGLMHandle() - chatbot[-1] = (inputs, load_message + "\n\n" + llama_glm_handle.info) - yield from update_ui(chatbot=chatbot, history=[]) - if not llama_glm_handle.success: - llama_glm_handle = None - return - - if additional_fn is not None: - from core_functional import handle_core_functionality - inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot) - - # 处理历史信息 - history_feedin = [] - for i in range(len(history)//2): - history_feedin.append([history[2*i], history[2*i+1]] ) - - # 开始接收jittorllms的回复 - response = "[Local Message]: 等待jittorllms响应中 ..." - for response in llama_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): - chatbot[-1] = (inputs, response) - yield from update_ui(chatbot=chatbot, history=history) - - # 总结输出 - if response == "[Local Message]: 等待jittorllms响应中 ...": - response = "[Local Message]: jittorllms响应异常 ..." - history.extend([inputs, response]) - yield from update_ui(chatbot=chatbot, history=history) diff --git a/spaces/LucasCodeBreak/MusicGen/audiocraft/data/audio.py b/spaces/LucasCodeBreak/MusicGen/audiocraft/data/audio.py deleted file mode 100644 index 2048df6f175d7303bcf5c7b931922fd297908ead..0000000000000000000000000000000000000000 --- a/spaces/LucasCodeBreak/MusicGen/audiocraft/data/audio.py +++ /dev/null @@ -1,215 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Audio IO methods are defined in this module (info, read, write), -We rely on av library for faster read when possible, otherwise on torchaudio. -""" - -from dataclasses import dataclass -from pathlib import Path -import logging -import typing as tp - -import numpy as np -import soundfile -import torch -from torch.nn import functional as F -import torchaudio as ta - -import av - -from .audio_utils import f32_pcm, i16_pcm, normalize_audio - - -_av_initialized = False - - -def _init_av(): - global _av_initialized - if _av_initialized: - return - logger = logging.getLogger('libav.mp3') - logger.setLevel(logging.ERROR) - _av_initialized = True - - -@dataclass(frozen=True) -class AudioFileInfo: - sample_rate: int - duration: float - channels: int - - -def _av_info(filepath: tp.Union[str, Path]) -> AudioFileInfo: - _init_av() - with av.open(str(filepath)) as af: - stream = af.streams.audio[0] - sample_rate = stream.codec_context.sample_rate - duration = float(stream.duration * stream.time_base) - channels = stream.channels - return AudioFileInfo(sample_rate, duration, channels) - - -def _soundfile_info(filepath: tp.Union[str, Path]) -> AudioFileInfo: - info = soundfile.info(filepath) - return AudioFileInfo(info.samplerate, info.duration, info.channels) - - -def audio_info(filepath: tp.Union[str, Path]) -> AudioFileInfo: - # torchaudio no longer returns useful duration informations for some formats like mp3s. - filepath = Path(filepath) - if filepath.suffix in ['.flac', '.ogg']: # TODO: Validate .ogg can be safely read with av_info - # ffmpeg has some weird issue with flac. - return _soundfile_info(filepath) - else: - return _av_info(filepath) - - -def _av_read(filepath: tp.Union[str, Path], seek_time: float = 0, duration: float = -1.) -> tp.Tuple[torch.Tensor, int]: - """FFMPEG-based audio file reading using PyAV bindings. - Soundfile cannot read mp3 and av_read is more efficient than torchaudio. - - Args: - filepath (str or Path): Path to audio file to read. - seek_time (float): Time at which to start reading in the file. - duration (float): Duration to read from the file. If set to -1, the whole file is read. - Returns: - Tuple[torch.Tensor, int]: Tuple containing audio data and sample rate - """ - _init_av() - with av.open(str(filepath)) as af: - stream = af.streams.audio[0] - sr = stream.codec_context.sample_rate - num_frames = int(sr * duration) if duration >= 0 else -1 - frame_offset = int(sr * seek_time) - # we need a small negative offset otherwise we get some edge artifact - # from the mp3 decoder. - af.seek(int(max(0, (seek_time - 0.1)) / stream.time_base), stream=stream) - frames = [] - length = 0 - for frame in af.decode(streams=stream.index): - current_offset = int(frame.rate * frame.pts * frame.time_base) - strip = max(0, frame_offset - current_offset) - buf = torch.from_numpy(frame.to_ndarray()) - if buf.shape[0] != stream.channels: - buf = buf.view(-1, stream.channels).t() - buf = buf[:, strip:] - frames.append(buf) - length += buf.shape[1] - if num_frames > 0 and length >= num_frames: - break - assert frames - # If the above assert fails, it is likely because we seeked past the end of file point, - # in which case ffmpeg returns a single frame with only zeros, and a weird timestamp. - # This will need proper debugging, in due time. - wav = torch.cat(frames, dim=1) - assert wav.shape[0] == stream.channels - if num_frames > 0: - wav = wav[:, :num_frames] - return f32_pcm(wav), sr - - -def audio_read(filepath: tp.Union[str, Path], seek_time: float = 0., - duration: float = -1., pad: bool = False) -> tp.Tuple[torch.Tensor, int]: - """Read audio by picking the most appropriate backend tool based on the audio format. - - Args: - filepath (str or Path): Path to audio file to read. - seek_time (float): Time at which to start reading in the file. - duration (float): Duration to read from the file. If set to -1, the whole file is read. - pad (bool): Pad output audio if not reaching expected duration. - Returns: - Tuple[torch.Tensor, int]: Tuple containing audio data and sample rate. - """ - fp = Path(filepath) - if fp.suffix in ['.flac', '.ogg']: # TODO: check if we can safely use av_read for .ogg - # There is some bug with ffmpeg and reading flac - info = _soundfile_info(filepath) - frames = -1 if duration <= 0 else int(duration * info.sample_rate) - frame_offset = int(seek_time * info.sample_rate) - wav, sr = soundfile.read(filepath, start=frame_offset, frames=frames, dtype=np.float32) - assert info.sample_rate == sr, f"Mismatch of sample rates {info.sample_rate} {sr}" - wav = torch.from_numpy(wav).t().contiguous() - if len(wav.shape) == 1: - wav = torch.unsqueeze(wav, 0) - elif ( - fp.suffix in ['.wav', '.mp3'] and fp.suffix[1:] in ta.utils.sox_utils.list_read_formats() - and duration <= 0 and seek_time == 0 - ): - # Torchaudio is faster if we load an entire file at once. - wav, sr = ta.load(fp) - else: - wav, sr = _av_read(filepath, seek_time, duration) - if pad and duration > 0: - expected_frames = int(duration * sr) - wav = F.pad(wav, (0, expected_frames - wav.shape[-1])) - return wav, sr - - -def audio_write(stem_name: tp.Union[str, Path], - wav: torch.Tensor, sample_rate: int, - format: str = 'wav', mp3_rate: int = 320, normalize: bool = True, - strategy: str = 'peak', peak_clip_headroom_db: float = 1, - rms_headroom_db: float = 18, loudness_headroom_db: float = 14, - loudness_compressor: bool = False, - log_clipping: bool = True, make_parent_dir: bool = True, - add_suffix: bool = True) -> Path: - """Convenience function for saving audio to disk. Returns the filename the audio was written to. - - Args: - stem_name (str or Path): Filename without extension which will be added automatically. - format (str): Either "wav" or "mp3". - mp3_rate (int): kbps when using mp3s. - normalize (bool): if `True` (default), normalizes according to the prescribed - strategy (see after). If `False`, the strategy is only used in case clipping - would happen. - strategy (str): Can be either 'clip', 'peak', or 'rms'. Default is 'peak', - i.e. audio is normalized by its largest value. RMS normalizes by root-mean-square - with extra headroom to avoid clipping. 'clip' just clips. - peak_clip_headroom_db (float): Headroom in dB when doing 'peak' or 'clip' strategy. - rms_headroom_db (float): Headroom in dB when doing 'rms' strategy. This must be much larger - than the `peak_clip` one to avoid further clipping. - loudness_headroom_db (float): Target loudness for loudness normalization. - loudness_compressor (bool): Uses tanh for soft clipping when strategy is 'loudness'. - when strategy is 'loudness'log_clipping (bool): If True, basic logging on stderr when clipping still - occurs despite strategy (only for 'rms'). - make_parent_dir (bool): Make parent directory if it doesn't exist. - Returns: - Path: Path of the saved audio. - """ - assert wav.dtype.is_floating_point, "wav is not floating point" - if wav.dim() == 1: - wav = wav[None] - elif wav.dim() > 2: - raise ValueError("Input wav should be at most 2 dimension.") - assert wav.isfinite().all() - wav = normalize_audio(wav, normalize, strategy, peak_clip_headroom_db, - rms_headroom_db, loudness_headroom_db, log_clipping=log_clipping, - sample_rate=sample_rate, stem_name=str(stem_name)) - kwargs: dict = {} - if format == 'mp3': - suffix = '.mp3' - kwargs.update({"compression": mp3_rate}) - elif format == 'wav': - wav = i16_pcm(wav) - suffix = '.wav' - kwargs.update({"encoding": "PCM_S", "bits_per_sample": 16}) - else: - raise RuntimeError(f"Invalid format {format}. Only wav or mp3 are supported.") - if not add_suffix: - suffix = '' - path = Path(str(stem_name) + suffix) - if make_parent_dir: - path.parent.mkdir(exist_ok=True, parents=True) - try: - ta.save(path, wav, sample_rate, **kwargs) - except Exception: - if path.exists(): - # we do not want to leave half written files around. - path.unlink() - raise - return path diff --git a/spaces/LucasCodeBreak/MusicGen/tests/common_utils/__init__.py b/spaces/LucasCodeBreak/MusicGen/tests/common_utils/__init__.py deleted file mode 100644 index 74ffcfef96fec35c99b2a1a053a61f44f7a8bbe9..0000000000000000000000000000000000000000 --- a/spaces/LucasCodeBreak/MusicGen/tests/common_utils/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -# flake8: noqa -from .temp_utils import TempDirMixin -from .wav_utils import get_batch_white_noise, get_white_noise, save_wav diff --git a/spaces/MBZ/LoRA-DreamBooth-Training-UI/constants.py b/spaces/MBZ/LoRA-DreamBooth-Training-UI/constants.py deleted file mode 100644 index baaebbae71058fbb4faed35fd00e7559305dc409..0000000000000000000000000000000000000000 --- a/spaces/MBZ/LoRA-DreamBooth-Training-UI/constants.py +++ /dev/null @@ -1,6 +0,0 @@ -import enum - - -class UploadTarget(enum.Enum): - PERSONAL_PROFILE = 'Personal Profile' - LORA_LIBRARY = 'LoRA Library' diff --git a/spaces/Mahiruoshi/MyGO_VIts-bert/app.py b/spaces/Mahiruoshi/MyGO_VIts-bert/app.py deleted file mode 100644 index 25cb8748be514050b2301d6faef31dc3bc1ac35e..0000000000000000000000000000000000000000 --- a/spaces/Mahiruoshi/MyGO_VIts-bert/app.py +++ /dev/null @@ -1,507 +0,0 @@ -# flake8: noqa: E402 -import logging -logging.getLogger("numba").setLevel(logging.WARNING) -logging.getLogger("markdown_it").setLevel(logging.WARNING) -logging.getLogger("urllib3").setLevel(logging.WARNING) -logging.getLogger("matplotlib").setLevel(logging.WARNING) - -logging.basicConfig( - level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s" -) - -logger = logging.getLogger(__name__) -import datetime -import numpy as np -import torch -from ebooklib import epub -import PyPDF2 -from PyPDF2 import PdfReader -import zipfile -import shutil -import sys, os -import json -from bs4 import BeautifulSoup -import argparse -import commons -import utils -from models import SynthesizerTrn -from text.symbols import symbols -from text import cleaned_text_to_sequence, get_bert -from text.cleaner import clean_text -import gradio as gr -import webbrowser -import re -from scipy.io.wavfile import write -from datetime import datetime -net_g = None -BandList = { - - "PoppinParty":["香澄","有咲","たえ","りみ","沙綾"], - "Afterglow":["蘭","モカ","ひまり","巴","つぐみ"], - "HelloHappyWorld":["こころ","美咲","薫","花音","はぐみ"], - "PastelPalettes":["彩","日菜","千聖","イヴ","麻弥"], - "Roselia":["友希那","紗夜","リサ","燐子","あこ"], - "RaiseASuilen":["レイヤ","ロック","ますき","チュチュ","パレオ"], - "Morfonica":["ましろ","瑠唯","つくし","七深","透子"], - "MyGo&AveMujica(Part)":["燈","愛音","そよ","立希","楽奈","祥子","睦","海鈴"], -} - -if sys.platform == "darwin" and torch.backends.mps.is_available(): - device = "mps" - os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" -else: - device = "cuda" - -def is_japanese(string): - for ch in string: - if ord(ch) > 0x3040 and ord(ch) < 0x30FF: - return True - return False - -def extrac(text): - text = re.sub("<[^>]*>","",text) - result_list = re.split(r'\n', text) - final_list = [] - for i in result_list: - i = i.replace('\n','').replace(' ','') - #Current length of single sentence: 20 - if len(i)>1: - if len(i) > 20: - try: - cur_list = re.split(r'。|!', i) - for i in cur_list: - if len(i)>1: - final_list.append(i+'。') - except: - pass - else: - final_list.append(i) - ''' - final_list.append(i) - ''' - final_list = [x for x in final_list if x != ''] - return final_list - -def get_text(text, language_str, hps): - norm_text, phone, tone, word2ph = clean_text(text, language_str) - phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str) - - if hps.data.add_blank: - phone = commons.intersperse(phone, 0) - tone = commons.intersperse(tone, 0) - language = commons.intersperse(language, 0) - for i in range(len(word2ph)): - word2ph[i] = word2ph[i] * 2 - word2ph[0] += 1 - bert = get_bert(norm_text, word2ph, language_str, device) - del word2ph - assert bert.shape[-1] == len(phone), phone - - if language_str == "ZH": - bert = bert - ja_bert = torch.zeros(768, len(phone)) - elif language_str == "JA": - ja_bert = bert - bert = torch.zeros(1024, len(phone)) - else: - bert = torch.zeros(1024, len(phone)) - ja_bert = torch.zeros(768, len(phone)) - - assert bert.shape[-1] == len( - phone - ), f"Bert seq len {bert.shape[-1]} != {len(phone)}" - - phone = torch.LongTensor(phone) - tone = torch.LongTensor(tone) - language = torch.LongTensor(language) - return bert, ja_bert, phone, tone, language - - -def infer(text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid, language): - global net_g - bert, ja_bert, phones, tones, lang_ids = get_text(text, language, hps) - with torch.no_grad(): - x_tst = phones.to(device).unsqueeze(0) - tones = tones.to(device).unsqueeze(0) - lang_ids = lang_ids.to(device).unsqueeze(0) - bert = bert.to(device).unsqueeze(0) - ja_bert = ja_bert.to(device).unsqueeze(0) - x_tst_lengths = torch.LongTensor([phones.size(0)]).to(device) - del phones - speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(device) - audio = ( - net_g.infer( - x_tst, - x_tst_lengths, - speakers, - tones, - lang_ids, - bert, - ja_bert, - sdp_ratio=sdp_ratio, - noise_scale=noise_scale, - noise_scale_w=noise_scale_w, - length_scale=length_scale, - )[0][0, 0] - .data.cpu() - .float() - .numpy() - ) - current_time = datetime.now() - print(str(current_time)+':'+str(sid)) - del x_tst, tones, lang_ids, bert, x_tst_lengths, speakers - return audio - - -def tts_fn( - text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale,LongSentence -): - if not LongSentence: - with torch.no_grad(): - audio = infer( - text, - sdp_ratio=sdp_ratio, - noise_scale=noise_scale, - noise_scale_w=noise_scale_w, - length_scale=length_scale, - sid=speaker, - language= "JP" if is_japanese(text) else "ZH", - ) - torch.cuda.empty_cache() - return (hps.data.sampling_rate, audio) - else: - audiopath = 'voice.wav' - a = ['【','[','(','('] - b = ['】',']',')',')'] - for i in a: - text = text.replace(i,'<') - for i in b: - text = text.replace(i,'>') - final_list = extrac(text.replace('“','').replace('”','')) - audio_fin = [] - for sentence in final_list: - with torch.no_grad(): - audio = infer( - sentence, - sdp_ratio=sdp_ratio, - noise_scale=noise_scale, - noise_scale_w=noise_scale_w, - length_scale=length_scale, - sid=speaker, - language= "JP" if is_japanese(text) else "ZH", - ) - audio_fin.append(audio) - return (hps.data.sampling_rate, np.concatenate(audio_fin)) - -def split_into_sentences(text): - """将文本分割为句子,基于中文的标点符号""" - sentences = re.split(r'(?<=[。!?…\n])', text) - return [sentence.strip() for sentence in sentences if sentence] - - -def seconds_to_ass_time(seconds): - """将秒数转换为ASS时间格式""" - hours = int(seconds / 3600) - minutes = int((seconds % 3600) / 60) - seconds = int(seconds) % 60 - milliseconds = int((seconds - int(seconds)) * 1000) - return "{:01d}:{:02d}:{:02d}.{:02d}".format(hours, minutes, seconds, int(milliseconds / 10)) - -def generate_audio_and_srt_for_group(group, outputPath, group_index, sampling_rate, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale,spealerList,silenceTime): - audio_fin = [] - ass_entries = [] - start_time = 0 - - ass_header = """[Script Info] -; Script generated by OpenAI Assistant -Title: Audiobook -ScriptType: v4.00+ -WrapStyle: 0 -PlayResX: 640 -PlayResY: 360 -ScaledBorderAndShadow: yes -[V4+ Styles] -Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding -Style: Default,Arial,20,&H00FFFFFF,&H000000FF,&H00000000,&H00000000,0,0,0,0,100,100,0,0,1,1,1,2,10,10,10,1 -[Events] -Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text -""" - - for sentence in group: - try: - print(sentence) - FakeSpeaker = sentence.split("|")[0] - print(FakeSpeaker) - SpeakersList = re.split('\n', spealerList) - if FakeSpeaker in list(hps.data.spk2id.keys()): - speaker = FakeSpeaker - for i in SpeakersList: - if FakeSpeaker == i.split("|")[1]: - speaker = i.split("|")[0] - speaker_ids = hps.data.spk2id - - _, audio = tts_fn(sentence.split("|")[-1], speaker=speaker, sdp_ratio=sdp_ratio, noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale, LongSentence=True) - silence_frames = int(silenceTime * 44010) - silence_data = np.zeros((silence_frames,), dtype=audio.dtype) - audio_fin.append(audio) - audio_fin.append(silence_data) - - duration = len(audio) / sampling_rate - end_time = start_time + duration + silenceTime - ass_entries.append("Dialogue: 0,{},{},".format(seconds_to_ass_time(start_time), seconds_to_ass_time(end_time)) + "Default,,0,0,0,,{}".format(sentence.replace("|",":"))) - start_time = end_time - except: - pass - wav_filename = os.path.join(outputPath, f'audiobook_part_{group_index}.wav') - ass_filename = os.path.join(outputPath, f'audiobook_part_{group_index}.ass') - - write(wav_filename, sampling_rate, np.concatenate(audio_fin)) - - with open(ass_filename, 'w', encoding='utf-8') as f: - f.write(ass_header + '\n'.join(ass_entries)) - return (hps.data.sampling_rate, np.concatenate(audio_fin)) -def extract_text_from_epub(file_path): - book = epub.read_epub(file_path) - content = [] - for item in book.items: - if isinstance(item, epub.EpubHtml): - soup = BeautifulSoup(item.content, 'html.parser') - content.append(soup.get_text()) - return '\n'.join(content) - -def extract_text_from_pdf(file_path): - with open(file_path, 'rb') as file: - reader = PdfReader(file) - content = [page.extract_text() for page in reader.pages] - return '\n'.join(content) - -def extract_text_from_game2(data): - current_content = [] - - def _extract(data, current_data=None): - nonlocal current_content - - if current_data is None: - current_data = {} - - if isinstance(data, dict): - if 'name' in data and 'body' in data: - current_name = data['name'] - current_body = data['body'].replace('\n', '') - current_content.append(f"{current_name}|{current_body}") - - for key, value in data.items(): - _extract(value, dict(current_data)) - - elif isinstance(data, list): - for item in data: - _extract(item, dict(current_data)) - - _extract(data) - return '\n'.join(current_content) - -def extract_text_from_file(inputFile): - file_extension = os.path.splitext(inputFile)[1].lower() - - if file_extension == ".epub": - return extract_text_from_epub(inputFile) - elif file_extension == ".pdf": - return extract_text_from_pdf(inputFile) - elif file_extension == ".txt": - with open(inputFile, 'r', encoding='utf-8') as f: - return f.read() - elif file_extension == ".asset": - with open(inputFile, 'r', encoding='utf-8') as f: - content = json.load(f) - return extract_text_from_game2(content) if extract_text_from_game2(content) != '' else extract_text_from_game2(content) - else: - raise ValueError(f"Unsupported file format: {file_extension}") - -def audiobook(inputFile, groupsize, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale,spealerList,silenceTime): - directory_path = "books" - output_path = "books/audiobook_part_1.wav" - - if os.path.exists(directory_path): - shutil.rmtree(directory_path) - - os.makedirs(directory_path) - text = extract_text_from_file(inputFile.name) - sentences = split_into_sentences(text) - GROUP_SIZE = groupsize - for i in range(0, len(sentences), GROUP_SIZE): - group = sentences[i:i+GROUP_SIZE] - if spealerList == "": - spealerList = "无" - result = generate_audio_and_srt_for_group(group,directory_path, i//GROUP_SIZE + 1, 44100, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale,spealerList,silenceTime) - if not torch.cuda.is_available(): - return result - return result - -def loadmodel(model): - _ = net_g.eval() - _ = utils.load_checkpoint(model, net_g, None, skip_optimizer=True) - return "success" - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - "-m", "--model", default="./logs/BangDream/G_45000.pth", help="path of your model" - ) - parser.add_argument( - "-c", - "--config", - default="configs/config.json", - help="path of your config file", - ) - parser.add_argument( - "--share", default=True, help="make link public", action="store_true" - ) - parser.add_argument( - "-d", "--debug", action="store_true", help="enable DEBUG-LEVEL log" - ) - - args = parser.parse_args() - if args.debug: - logger.info("Enable DEBUG-LEVEL log") - logging.basicConfig(level=logging.DEBUG) - device = ( - "cuda:0" - if torch.cuda.is_available() - else ( - "mps" - if sys.platform == "darwin" and torch.backends.mps.is_available() - else "cpu" - ) - ) - hps = utils.get_hparams_from_file(args.config) - net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - **hps.model, - ).to(device) - loadmodel(args.model) - speaker_ids = hps.data.spk2id - speakers = list(speaker_ids.keys()) - languages = ["ZH", "JP"] - examples = [ - ["filelist/Scenarioband6-018.asset", 500, "つくし", "ましろ|真白\n七深|七深\n透子|透子\nつくし|筑紫\n瑠唯|瑠唯\nそよ|素世\n祥子|祥子", "扩展功能"], - ] - modelPaths = [] - for dirpath, dirnames, filenames in os.walk("./logs/BangDream/"): - for filename in filenames: - modelPaths.append(os.path.join(dirpath, filename)) - with gr.Blocks() as app: - gr.Markdown( - f"少歌邦邦全员TTS,使用本模型请严格遵守法律法规!\n 发布二创作品请注明项目和本模型作者B站@Mahiroshi及项目链接\n从 我的博客站点 查看使用说明" - ) - for band in BandList: - with gr.TabItem(band): - for name in BandList[band]: - with gr.TabItem(name): - with gr.Row(): - with gr.Column(): - with gr.Row(): - gr.Markdown( - '
' - f'' - '
' - ) - length_scale = gr.Slider( - minimum=0.1, maximum=2, value=1, step=0.01, label="语速调节" - ) - with gr.Accordion(label="切换模型(合成中文建议切换为早期模型)", open=False): - modelstrs = gr.Dropdown(label = "模型", choices = modelPaths, value = modelPaths[0], type = "value") - btnMod = gr.Button("载入模型") - statusa = gr.TextArea() - btnMod.click(loadmodel, inputs=[modelstrs], outputs = [statusa]) - with gr.Column(): - text = gr.TextArea( - label="输入纯日语或者中文", - placeholder="输入纯日语或者中文", - value="有个人躺在地上,哀嚎......\n有个人睡着了,睡在盒子里。\n我要把它打开,看看他的梦是什么。", - ) - btn = gr.Button("点击生成", variant="primary") - audio_output = gr.Audio(label="Output Audio") - with gr.Accordion(label="其它参数设定", open=False): - sdp_ratio = gr.Slider( - minimum=0, maximum=1, value=0.2, step=0.01, label="SDP/DP混合比" - ) - noise_scale = gr.Slider( - minimum=0.1, maximum=2, value=0.6, step=0.01, label="感情调节" - ) - noise_scale_w = gr.Slider( - minimum=0.1, maximum=2, value=0.8, step=0.01, label="音素长度" - ) - LongSentence = gr.Checkbox(value=True, label="Generate LongSentence") - speaker = gr.Dropdown( - choices=speakers, value=name, label="说话人" - ) - btn.click( - tts_fn, - inputs=[ - text, - speaker, - sdp_ratio, - noise_scale, - noise_scale_w, - length_scale, - LongSentence, - ], - outputs=[audio_output], - ) - for i in examples: - with gr.Tab(i[-1]): - with gr.Row(): - with gr.Column(): - gr.Markdown( - f"从 我的博客站点 查看自制galgame使用说明\n" - ) - inputFile = gr.inputs.File(label="上传txt(可设置角色对应表)、epub或mobi文件") - groupSize = gr.Slider( - minimum=10, maximum=1000,value = i[1], step=1, label="当个音频文件包含的最大字数" - ) - silenceTime = gr.Slider( - minimum=0, maximum=1, value=0.5, step=0.1, label="句子的间隔" - ) - spealerList = gr.TextArea( - label="角色对应表", - placeholder="左边是你想要在每一句话合成中用到的speaker(见角色清单)右边是你上传文本时分隔符左边设置的说话人:{ChoseSpeakerFromConfigList1}|{SeakerInUploadText1}\n{ChoseSpeakerFromConfigList2}|{SeakerInUploadText2}\n{ChoseSpeakerFromConfigList3}|{SeakerInUploadText3}\n", - value = i[3], - ) - speaker = gr.Dropdown( - choices=speakers, value = i[2], label="选择默认说话人" - ) - with gr.Column(): - sdp_ratio = gr.Slider( - minimum=0, maximum=1, value=0.2, step=0.01, label="SDP/DP混合比" - ) - noise_scale = gr.Slider( - minimum=0.1, maximum=2, value=0.6, step=0.01, label="感情调节" - ) - noise_scale_w = gr.Slider( - minimum=0.1, maximum=2, value=0.8, step=0.01, label="音素长度" - ) - length_scale = gr.Slider( - minimum=0.1, maximum=2, value=1, step=0.01, label="生成长度" - ) - LastAudioOutput = gr.Audio(label="当用cuda在本地运行时才能在book文件夹下浏览全部合成内容") - btn2 = gr.Button("点击生成", variant="primary") - btn2.click( - audiobook, - inputs=[ - inputFile, - groupSize, - speaker, - sdp_ratio, - noise_scale, - noise_scale_w, - length_scale, - spealerList, - silenceTime - ], - outputs=[LastAudioOutput], - ) -app.launch() diff --git a/spaces/Manjushri/MusicGen/audiocraft/modules/__init__.py b/spaces/Manjushri/MusicGen/audiocraft/modules/__init__.py deleted file mode 100644 index 81ba30f6466ff91b90490a4fb92f7d3d0d00144d..0000000000000000000000000000000000000000 --- a/spaces/Manjushri/MusicGen/audiocraft/modules/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -# flake8: noqa -from .conv import ( - NormConv1d, - NormConv2d, - NormConvTranspose1d, - NormConvTranspose2d, - StreamableConv1d, - StreamableConvTranspose1d, - pad_for_conv1d, - pad1d, - unpad1d, -) -from .lstm import StreamableLSTM -from .seanet import SEANetEncoder, SEANetDecoder diff --git a/spaces/MarcCote/TextWorldExpress/README.md b/spaces/MarcCote/TextWorldExpress/README.md deleted file mode 100644 index 670752a6e5b27c5a5f60d46d59f7b86a10e9dfc0..0000000000000000000000000000000000000000 --- a/spaces/MarcCote/TextWorldExpress/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: TextWorldExpress -emoji: 🤖📚⏩ -colorFrom: green -colorTo: indigo -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Marshalls/testmtd/analysis/pymo/mocapplayer/data-template.js b/spaces/Marshalls/testmtd/analysis/pymo/mocapplayer/data-template.js deleted file mode 100644 index 68a51392fb7d2458487eae2a00a3ed03c1e7153a..0000000000000000000000000000000000000000 --- a/spaces/Marshalls/testmtd/analysis/pymo/mocapplayer/data-template.js +++ /dev/null @@ -1,3 +0,0 @@ -var dataBuffer = `$$DATA$$`; - -start(dataBuffer); \ No newline at end of file diff --git a/spaces/MathysL/AutoGPT4/autogpt/json_utils/json_fix_general.py b/spaces/MathysL/AutoGPT4/autogpt/json_utils/json_fix_general.py deleted file mode 100644 index 7010fa3b9c1909de0e5a7f6ec13ca8aa418fe6c7..0000000000000000000000000000000000000000 --- a/spaces/MathysL/AutoGPT4/autogpt/json_utils/json_fix_general.py +++ /dev/null @@ -1,124 +0,0 @@ -"""This module contains functions to fix JSON strings using general programmatic approaches, suitable for addressing -common JSON formatting issues.""" -from __future__ import annotations - -import contextlib -import json -import re -from typing import Optional - -from autogpt.config import Config -from autogpt.json_utils.utilities import extract_char_position - -CFG = Config() - - -def fix_invalid_escape(json_to_load: str, error_message: str) -> str: - """Fix invalid escape sequences in JSON strings. - - Args: - json_to_load (str): The JSON string. - error_message (str): The error message from the JSONDecodeError - exception. - - Returns: - str: The JSON string with invalid escape sequences fixed. - """ - while error_message.startswith("Invalid \\escape"): - bad_escape_location = extract_char_position(error_message) - json_to_load = ( - json_to_load[:bad_escape_location] + json_to_load[bad_escape_location + 1 :] - ) - try: - json.loads(json_to_load) - return json_to_load - except json.JSONDecodeError as e: - if CFG.debug_mode: - print("json loads error - fix invalid escape", e) - error_message = str(e) - return json_to_load - - -def balance_braces(json_string: str) -> Optional[str]: - """ - Balance the braces in a JSON string. - - Args: - json_string (str): The JSON string. - - Returns: - str: The JSON string with braces balanced. - """ - - open_braces_count = json_string.count("{") - close_braces_count = json_string.count("}") - - while open_braces_count > close_braces_count: - json_string += "}" - close_braces_count += 1 - - while close_braces_count > open_braces_count: - json_string = json_string.rstrip("}") - close_braces_count -= 1 - - with contextlib.suppress(json.JSONDecodeError): - json.loads(json_string) - return json_string - - -def add_quotes_to_property_names(json_string: str) -> str: - """ - Add quotes to property names in a JSON string. - - Args: - json_string (str): The JSON string. - - Returns: - str: The JSON string with quotes added to property names. - """ - - def replace_func(match: re.Match) -> str: - return f'"{match[1]}":' - - property_name_pattern = re.compile(r"(\w+):") - corrected_json_string = property_name_pattern.sub(replace_func, json_string) - - try: - json.loads(corrected_json_string) - return corrected_json_string - except json.JSONDecodeError as e: - raise e - - -def correct_json(json_to_load: str) -> str: - """ - Correct common JSON errors. - Args: - json_to_load (str): The JSON string. - """ - - try: - if CFG.debug_mode: - print("json", json_to_load) - json.loads(json_to_load) - return json_to_load - except json.JSONDecodeError as e: - if CFG.debug_mode: - print("json loads error", e) - error_message = str(e) - if error_message.startswith("Invalid \\escape"): - json_to_load = fix_invalid_escape(json_to_load, error_message) - if error_message.startswith( - "Expecting property name enclosed in double quotes" - ): - json_to_load = add_quotes_to_property_names(json_to_load) - try: - json.loads(json_to_load) - return json_to_load - except json.JSONDecodeError as e: - if CFG.debug_mode: - print("json loads error - add quotes", e) - error_message = str(e) - if balanced_str := balance_braces(json_to_load): - return balanced_str - return json_to_load diff --git a/spaces/MeiJuice/CheckGPT/app.py b/spaces/MeiJuice/CheckGPT/app.py deleted file mode 100644 index 54da86695f8085a61ffddd94dcf5530e2b878463..0000000000000000000000000000000000000000 --- a/spaces/MeiJuice/CheckGPT/app.py +++ /dev/null @@ -1,50 +0,0 @@ -import os -import gradio as gr -from transformers import pipeline - -auth_token = os.environ.get("access_token") -pipeline_en = pipeline(task="text-classification", model="MeiJuice/CheckGPT", - use_auth_token=auth_token) -pipeline_zh = pipeline(task="text-classification", model="MeiJuice/CheckGPT-Chinese", - use_auth_token=auth_token) - - -def predict_en(text): - res = pipeline_en(text, truncation=True)[0] - return "ChatGPT" if res['label'] == "LABEL_1" else "human", res['score'] - - -def predict_zh(text): - res = pipeline_zh(text, truncation=True, max_length=512)[0] - return "ChatGPT" if res['label'] == "LABEL_1" else "human", res['score'] - - -with gr.Blocks() as demo: - with gr.Tab("English"): - gr.Markdown(""" - Note: Providing more text to the `Text` box can make the prediction more accurate! - """) - t1 = gr.Textbox(lines=5, label='Text', - value="No one can call back yesterday, Yesterday will not be called again.") - button1 = gr.Button("Predict!") - label1 = gr.Textbox(lines=1, label='Predicted Label') - score1 = gr.Textbox(lines=1, label='Prob') - with gr.Tab("中文版"): - gr.Markdown(""" - 注意: 在`文本`栏中输入更多的文本,可以让预测更准确哦! - """) - t2 = gr.Textbox(lines=5, label='文本', - value="联邦学习(Federated learning)是在进行分布式机器学习的过程中,各参与方可借助其他参与方数据进行联合建模和使用模型。参与各方无需传递和共享原始数据资源,同时保护模型参数,即在数据不出本地的情况下,进行数据联合训练、联合应用,建立合法合规的机器学习模型,成为一种解决合作中数据隐私与数据共享矛盾的新路径,FL本质上承诺多方通过交换梯度而不是原始数据来联合训练模型。") - button2 = gr.Button("预测!") - label2 = gr.Textbox(lines=1, label='预测结果 ') - score2 = gr.Textbox(lines=1, label='模型概率') - - button1.click(predict_en, inputs=[t1], outputs=[label1, score1], api_name='predict_en') - button2.click(predict_zh, inputs=[t2], outputs=[label2, score2], api_name='predict_zh') - - gr.Markdown(""" -
- """) - - -demo.launch() \ No newline at end of file diff --git a/spaces/MingGatsby/Grounding_DINO_demo/groundingdino/models/__init__.py b/spaces/MingGatsby/Grounding_DINO_demo/groundingdino/models/__init__.py deleted file mode 100644 index e3413961d1d184b99835eb1e919b052d70298bc6..0000000000000000000000000000000000000000 --- a/spaces/MingGatsby/Grounding_DINO_demo/groundingdino/models/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# ------------------------------------------------------------------------ -# Grounding DINO -# url: https://github.com/IDEA-Research/GroundingDINO -# Copyright (c) 2023 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -from .GroundingDINO import build_groundingdino - - -def build_model(args): - # we use register to maintain models from catdet6 on. - from .registry import MODULE_BUILD_FUNCS - - assert args.modelname in MODULE_BUILD_FUNCS._module_dict - build_func = MODULE_BUILD_FUNCS.get(args.modelname) - model = build_func(args) - return model diff --git a/spaces/MirageML/sjc/sd1/ldm/models/diffusion/ddim.py b/spaces/MirageML/sjc/sd1/ldm/models/diffusion/ddim.py deleted file mode 100644 index fb31215db5c3f3f703f15987d7eee6a179c9f7ec..0000000000000000000000000000000000000000 --- a/spaces/MirageML/sjc/sd1/ldm/models/diffusion/ddim.py +++ /dev/null @@ -1,241 +0,0 @@ -"""SAMPLING ONLY.""" - -import torch -import numpy as np -from tqdm import tqdm -from functools import partial - -from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, \ - extract_into_tensor - - -class DDIMSampler(object): - def __init__(self, model, schedule="linear", **kwargs): - super().__init__() - self.model = model - self.ddpm_num_timesteps = model.num_timesteps - self.schedule = schedule - - def register_buffer(self, name, attr): - if type(attr) == torch.Tensor: - if attr.device != torch.device("cuda"): - attr = attr.to(torch.device("cuda")) - setattr(self, name, attr) - - def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): - self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, - num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) - alphas_cumprod = self.model.alphas_cumprod - assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' - to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) - - self.register_buffer('betas', to_torch(self.model.betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) - self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) - self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) - - # ddim sampling parameters - ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), - ddim_timesteps=self.ddim_timesteps, - eta=ddim_eta,verbose=verbose) - self.register_buffer('ddim_sigmas', ddim_sigmas) - self.register_buffer('ddim_alphas', ddim_alphas) - self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) - self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) - sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( - (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( - 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) - self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) - - @torch.no_grad() - def sample(self, - S, - batch_size, - shape, - conditioning=None, - callback=None, - normals_sequence=None, - img_callback=None, - quantize_x0=False, - eta=0., - mask=None, - x0=None, - temperature=1., - noise_dropout=0., - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1., - unconditional_conditioning=None, - # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - **kwargs - ): - if conditioning is not None: - if isinstance(conditioning, dict): - cbs = conditioning[list(conditioning.keys())[0]].shape[0] - if cbs != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - else: - if conditioning.shape[0] != batch_size: - print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") - - self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) - # sampling - C, H, W = shape - size = (batch_size, C, H, W) - print(f'Data shape for DDIM sampling is {size}, eta {eta}') - - samples, intermediates = self.ddim_sampling(conditioning, size, - callback=callback, - img_callback=img_callback, - quantize_denoised=quantize_x0, - mask=mask, x0=x0, - ddim_use_original_steps=False, - noise_dropout=noise_dropout, - temperature=temperature, - score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - x_T=x_T, - log_every_t=log_every_t, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - ) - return samples, intermediates - - @torch.no_grad() - def ddim_sampling(self, cond, shape, - x_T=None, ddim_use_original_steps=False, - callback=None, timesteps=None, quantize_denoised=False, - mask=None, x0=None, img_callback=None, log_every_t=100, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None,): - device = self.model.betas.device - b = shape[0] - if x_T is None: - img = torch.randn(shape, device=device) - else: - img = x_T - - if timesteps is None: - timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps - elif timesteps is not None and not ddim_use_original_steps: - subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 - timesteps = self.ddim_timesteps[:subset_end] - - intermediates = {'x_inter': [img], 'pred_x0': [img]} - time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps) - total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] - print(f"Running DDIM Sampling with {total_steps} timesteps") - - iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps) - - for i, step in enumerate(iterator): - index = total_steps - i - 1 - ts = torch.full((b,), step, device=device, dtype=torch.long) - - if mask is not None: - assert x0 is not None - img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? - img = img_orig * mask + (1. - mask) * img - - outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, - quantize_denoised=quantize_denoised, temperature=temperature, - noise_dropout=noise_dropout, score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning) - img, pred_x0 = outs - if callback: callback(i) - if img_callback: img_callback(pred_x0, i) - - if index % log_every_t == 0 or index == total_steps - 1: - intermediates['x_inter'].append(img) - intermediates['pred_x0'].append(pred_x0) - - return img, intermediates - - @torch.no_grad() - def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None): - b, *_, device = *x.shape, x.device - - if unconditional_conditioning is None or unconditional_guidance_scale == 1.: - e_t = self.model.apply_model(x, t, c) - else: - x_in = torch.cat([x] * 2) - t_in = torch.cat([t] * 2) - c_in = torch.cat([unconditional_conditioning, c]) - e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) - e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) - - if score_corrector is not None: - assert self.model.parameterization == "eps" - e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) - - alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas - alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev - sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas - sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas - # select parameters corresponding to the currently considered timestep - a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) - a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) - sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) - sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) - - # current prediction for x_0 - pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() - if quantize_denoised: - pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) - # direction pointing to x_t - dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t - noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise - return x_prev, pred_x0 - - @torch.no_grad() - def stochastic_encode(self, x0, t, use_original_steps=False, noise=None): - # fast, but does not allow for exact reconstruction - # t serves as an index to gather the correct alphas - if use_original_steps: - sqrt_alphas_cumprod = self.sqrt_alphas_cumprod - sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod - else: - sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) - sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas - - if noise is None: - noise = torch.randn_like(x0) - return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 + - extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise) - - @torch.no_grad() - def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None, - use_original_steps=False): - - timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps - timesteps = timesteps[:t_start] - - time_range = np.flip(timesteps) - total_steps = timesteps.shape[0] - print(f"Running DDIM Sampling with {total_steps} timesteps") - - iterator = tqdm(time_range, desc='Decoding image', total=total_steps) - x_dec = x_latent - for i, step in enumerate(iterator): - index = total_steps - i - 1 - ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long) - x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning) - return x_dec \ No newline at end of file diff --git a/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/_base_/datasets/iiit5k.py b/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/_base_/datasets/iiit5k.py deleted file mode 100644 index 11d1183955e893585323321ca0a23bb655074715..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/_base_/datasets/iiit5k.py +++ /dev/null @@ -1,14 +0,0 @@ -iiit5k_textrecog_data_root = '../data/common_benchmarks/IIIT5K' - -iiit5k_textrecog_train = dict( - type='OCRDataset', - data_root=iiit5k_textrecog_data_root, - ann_file='textrecog_train.json', - pipeline=None) - -iiit5k_textrecog_test = dict( - type='OCRDataset', - data_root=iiit5k_textrecog_data_root, - ann_file='annotation.json', - test_mode=True, - pipeline=None) diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/__init__.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/__init__.py deleted file mode 100644 index e573c71efd65c3c94fe7e10c2031bae88cb9fc90..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .backbones import * # NOQA -from .data_preprocessors import * # NOQA -from .decoders import * # NOQA -from .encoders import * # NOQA -from .layers import * # NOQA -from .module_losses import * # NOQA -from .plugins import * # NOQA -from .postprocessors import * # NOQA -from .preprocessors import * # NOQA -from .recognizers import * # NOQA diff --git a/spaces/MrAI-Rohan/three-dog-breeds-detector/app.py b/spaces/MrAI-Rohan/three-dog-breeds-detector/app.py deleted file mode 100644 index be1ff1c2b7036e45a7a5e48f73ad62454d334d16..0000000000000000000000000000000000000000 --- a/spaces/MrAI-Rohan/three-dog-breeds-detector/app.py +++ /dev/null @@ -1,16 +0,0 @@ -import gradio as gr -from fastai import * -from fastai.vision.all import * - - -learn_inf = load_learner("export.pkl") - -def predict_breed(img): - - pred, pred_idx, probs = learn_inf.predict(img) - return f'Prediction: {pred}; Probability: {probs[pred_idx]:.04f}' - -gr.inputs.Image(tool=False, optional=False) -webpage = gr.Interface(fn=predict_breed, inputs=gr.inputs.Image(tool=False, optional=False), outputs="text", title="Dog Breed Detector", live=True, - description="It detects wether the dog in the image is German Shepherd, Siberian Husky or Labrador.", examples=[["example1.jpg"], ["example2.jpg"], ["example3.jpg"]]) -webpage.launch() \ No newline at end of file diff --git a/spaces/Navpreet/rabbit3/Rabbit.web.py b/spaces/Navpreet/rabbit3/Rabbit.web.py deleted file mode 100644 index 06966b79539fc59902fdc0e59e44ecb831ac5266..0000000000000000000000000000000000000000 --- a/spaces/Navpreet/rabbit3/Rabbit.web.py +++ /dev/null @@ -1,491 +0,0 @@ -import streamlit as st -from firebase import Firebase -from datetime import datetime -from streamlit_option_menu import option_menu -from PIL import Image -import requests -import streamlit.components.v1 as components -import random -import json -from io import BytesIO -import feedparser -import urllib.request - - - - -#streamlit-1.16.0 - - -im = Image.open("icons8-rabbit-100.png") -st.set_page_config( - page_title="Rabbit.web", - page_icon=im - -) - - -#https://i.gifer.com/Cal.gif - - - - -hide_streamlit_style = """ - - """ -st.markdown(hide_streamlit_style, unsafe_allow_html=True) - - -firebaseConfig = { - "apiKey": "AIzaSyCHnlRFW1_RTgZVVga8E-Rj4g7noddYzXA", - "authDomain": "rabbit1-bd232.firebaseapp.com", - "databaseURL": "https://rabbit1-bd232-default-rtdb.firebaseio.com", - "projectId": "rabbit1-bd232", - "storageBucket": "rabbit1-bd232.appspot.com", - "messagingSenderId": "291333251174", - "appId": "1:291333251174:web:6daeb9908880347a6ecda7", - "measurementId": "G-H1NRRJQRHT" -} - -firebase=Firebase(firebaseConfig) -auth=firebase.auth() - -data=firebase.database() -storage=firebase.storage() - - - -query_params = { - "orderBy": "\"timestamp\"", - "limitToLast": 1 -} -query_string = "?" + "&".join([f"{key}={value}" for key, value in query_params.items()]) - -latest_post = data.child("Posts").get(query_string).val() - - - - - - - - - - - -st.markdown("
centered
",unsafe_allow_html=True) -labela=("

Rabbit.web

") -st.markdown(labela,unsafe_allow_html=True) -streamlit_style = """ - - """ -st.markdown(streamlit_style, unsafe_allow_html=True) -streamlitstyle = """ - - """ - - - -st.sidebar.markdown(streamlitstyle, unsafe_allow_html=True) -st.sidebar.markdown("
centered
",unsafe_allow_html=True) - -placeholder = st.empty() -with placeholder.container(): - label=("

Welcome to Rabbit.web

") - st.markdown(label,unsafe_allow_html=True) - label=("
At Rabbit.web you can do the following thing's :
") - st.markdown(label,unsafe_allow_html=True) - labelc=("
~You can share your thought's
") - st.markdown(labelc,unsafe_allow_html=True) - - - labeld=("
~You can see the post's from people
") - st.markdown(labeld,unsafe_allow_html=True) - labeld=("
~You can check the latest new's
") - st.markdown(labeld,unsafe_allow_html=True) - - - - - -label=("

Rabbit.web

") -st.sidebar.markdown(label,unsafe_allow_html=True) - -labelb=("

A perfect place to chat with your friend's

") -st.sidebar.markdown(labelb,unsafe_allow_html=True) - -choice=st.sidebar.selectbox("Sign in to your account or create an account :",["sign in","create an account"]) - - - -email=st.sidebar.text_input("",placeholder="Hello please enter you email") -passw=st.sidebar.text_input("",placeholder="Hello please enter your password",type="password") - - - -if choice=="create an account": - handle=st.sidebar.text_input("",placeholder="Hello please enter your name") - subbt=st.sidebar.button("Create an new account") - - if subbt: - placeholder.empty() - user=auth.create_user_with_email_and_password(email,passw) - st.success("Your Rabbit.web account has created successfully !") - - user=auth.sign_in_with_email_and_password(email,passw) - data.child(user["localId"]).child("Handle").set(handle) - data.child(user["localId"]).child("ID").set(user["localId"]) - st.info("You can now log in") - - - - -if choice=="sign in": - - signin=st.sidebar.checkbox("sign in") - - - if signin: - placeholder.empty() - user=auth.sign_in_with_email_and_password(email,passw) - - - #"Follower's" "list-task" - nav = option_menu(menu_title=None, options=["Home", "Friend's","New's", "Setting's","About us"],icons=["house","person","list-task", "gear","info"],menu_icon="cast",default_index=2,orientation="vertical",styles={ - "container": {"padding": "0!important", "background-color": "#1c1c1c"}, - "icon": {"color": "lightblue", "font-size": "15px"}, - "nav-link": {"text-align":"left", "margin":"1px", "--hover-color": "#1c1c1c"}, - "nav-link-selected": {"background-color": "#228BE6","color":"#1c1c1c"},}) - - if nav =="Home": - - - st.write(f"#### Share your thought's/post's :") - post=st.text_input("",placeholder="share your thought with your friend's",max_chars=250) - add_post=st.button("Share your thought") - - - - - - - - - - if add_post: - now=datetime.now() - dt=now.strftime("%d / %m / %y") - dtt=now.strftime("%I:%M %p") - - post="Post: "+post+ ";"+" Posted on:"+ dt +" at "+dtt - results=data.child(user["localId"]).child("Posts").push(post) - st.balloons() - - - # st.write("Upload an image") - - # caption = st.text_input("",placeholder="Add a caption to your image") - # expan=st.expander("Upload an image") - - # with expan: - # image = st.file_uploader("", type=["jpg", "jpeg", "png","mp3"]) - - # if image is None: - # st.warning("Please select an image") - #upbta=st.button("Upload the image and caption") - - # if upbta: - # with st.spinner("Uploading image..."): - # storage.child("images/" + image.name).put(image) - # post_data = {"caption": caption,"image_url": storage.child("images/" + image.name).get_url(None) } - # data.child("posts").push(post_data) - #st.success("Post added successfully")''' - components.html("""
""") - col1,col2=st.columns(2) - - with col1: - nimg=data.child(user["localId"]).child("Image").get().val() - if nimg is not None: - v=data.child(user["localId"]).child("Image").get() - for img in v.each(): - imgc=img.val() - - st.markdown(f'', unsafe_allow_html=True) - - - else: - st.info("Oop's no profile pic till now ") - - - - - with col2: - st.title("Post's :") - st.write(f"###### ______________________________________________________") - all_posts=data.child(user['localId']).child("Posts").get() - all_imgs=data.child(user['localId']).child("images").get() - - if all_posts.val() is not None: - for Posts in reversed(all_posts.each()): - - st.success(Posts.val()) - if st.button("🗑 Delete this post ",key=f"Delete_({Posts.key()})"): - data.child(user["localId"]).child("Posts").child(Posts.key()).remove() - - - - - st.write(f"###### ______________________________________________________") - - - - - - - - else: - st.info("Oop's no thought till now") - - - # posts = data.child("posts").get() - #for post in posts.each(): - #caption = post.val()["caption"] - #image_url = post.val()["image_url"] - - #st.write(caption) - #response = requests.get(image_url) - #img = Image.open(BytesIO(response.content)) - #st.image(img, caption=caption, use_column_width=True) - #components.html("""
""") - - - col3=st.columns(1) - with col1: - st.title("Bio :") - all_bio=data.child(user["localId"]).child("Bio").get() - - if all_bio.val() is not None: - - bio=data.child(user["localId"]).child("Bio").get() - for bio in bio.each(): - bioc=bio.val() - st.info(bioc) - else: - st.info("Oop's no Bio till now") - - - elif nav =="Setting's": - nimg=data.child(user["localId"]).child("Image").get().val() - if nimg is not None: - Image=data.child(user["localId"]).child("Image").get() - for img in Image.each(): - imgc=img.val() - - st.markdown(f'', unsafe_allow_html=True) - - expa=st.expander("Change your profile pic") - - with expa: - newimgp=st.file_uploader("Please choose your profile pic") - upbt=st.button("Upload profile pic") - if upbt: - uid=user["localId"] - dataup=storage.child(uid).put(newimgp,user["idToken"]) - aimgdata=storage.child(uid).get_url(dataup["downloadTokens"]) - - data.child(user["localId"]).child("Image").push(aimgdata) - - - st.info("Your profile pic is set successfully") - st.balloons() - else: - st.info("Oop's no profile pic till now") - newimgp=st.file_uploader("Please choose your profile pic") - upbt=st.button("Upload profile pic") - if upbt: - uid=user["localId"] - dataup=storage.child(uid).put(newimgp,user["idToken"]) - aimgdata=storage.child(uid).get_url(dataup["downloadTokens"]) - data.child(user["localId"]).child("Image").push(aimgdata) - - bio=data.child(user["localId"]).child("Bio").get().val() - if bio is not None: - bio=data.child(user["localId"]).child("Bio").get() - for bio in bio.each(): - bioc=bio.val() - st.info(bioc) - - bioin=st.text_area("",placeholder="Enter your Bio to be uploaded eg: name,date of birth etc") - upbtn=st.button("Upload Bio") - - if upbtn: - - - - data.child(user["localId"]).child("Bio").push(bioin) - - st.info("Your Bio is set successfully") - st.balloons() - else: - st.info("Oop's no Bio till now") - bioin=st.text_area("",placeholder="Enter your Bio to be uploaded eg: name,date of birth etc") - upbtn=st.button("Upload Bio") - - if upbtn: - - - data.child(user["localId"]).child("Bio").push(bioin) - - st.info("Your Bio is set successfully") - st.balloons() - - - elif nav=="Friend's": - allu=data.get() - resa=[] - - for ush in allu.each(): - - k=ush.val().get("Handle") - resa.append(k) - - n = len(resa) - - st.title("Search your Friend's :") - cho = st.selectbox('',resa) - pusha = st.button('Show Profile') - - if pusha: - for ush in allu.each(): - k=ush.val().get("Handle") - if k==cho: - l=ush.val().get("ID") - - hn=data.child(l).child("Handle").get().val() - - st.markdown(hn,unsafe_allow_html=True) - components.html("""
""") - col1,col2=st.columns(2) - with col1: - nimg=data.child(l).child("Image").get().val() - if nimg is not None: - v=data.child(l).child("Image").get() - for img in v.each(): - imgc=img.val() - - st.markdown(f'', unsafe_allow_html=True) - - else: - st.info("Oop's no profile pic till now ") - - - - - - - with col2: - st.title("Post's :") - st.write(f"###### ______________________________________________________") - all_posts=data.child(l).child("Posts").get() - if all_posts.val() is not None: - for Posts in reversed(all_posts.each()): - - st.success(Posts.val()) - - - - - - - st.write(f"###### ______________________________________________________") - - else: - st.info("Oop's no thought till now") - - - - col3=st.columns(1) - with col1: - st.title("Bio :") - all_bio=data.child(l).child("Bio").get() - - if all_bio.val() is not None: - bio=data.child(l).child("Bio").get() - for bio in bio.each(): - bioc=bio.val() - st.info(bioc) - else: - st.info("Oop's no Bio till now") - elif nav=="New's": - - st.title("Have a look at today's latest new's :") - components.html("""
""") - news_feed = feedparser.parse("https://rss.nytimes.com/services/xml/rss/nyt/World.xml") - - - for item in news_feed.entries: - try: - st.write(f"## {item.title}") - st.write(item.summary) - image_url = item.media_content[0]["url"] - image_file = urllib.request.urlopen(image_url) - image = Image.open(image_file) - st.write(f"[Read more]({item.link})") - - st.image(image, caption="", use_column_width=True) - st.write(f"###### ______________________________________________________") - except: - st.write("") - - - - #st.info("Sorry this page is currently under construction") - #st.markdown("

⚠️

",unsafe_allow_html=True) - #st.components.v1.html('', width=800, height=800) - - - - else: - st.write("Rabbit.web") - st.write("Created and maintained by Navpreet Singh") - st.write("For help,feedback or suggestion contact our company at rabbitweb854@gmail.com") - st.write("For reporting a user on Rabbit.web contact us at rabbitweb854@gmail.com") - - -#{"rules": { - # ".read": "now < 1682706600000", // 2023-4-29 - # ".write": "now < 1682706600000", // 2023-4-29 - #} -#} - - - - - - - - - - - - diff --git a/spaces/Nephele/bert-vits2-multi-voice/utils.py b/spaces/Nephele/bert-vits2-multi-voice/utils.py deleted file mode 100644 index c6aa6cfc64c33e2eed33e9845239e831fc1c4a1a..0000000000000000000000000000000000000000 --- a/spaces/Nephele/bert-vits2-multi-voice/utils.py +++ /dev/null @@ -1,293 +0,0 @@ -import os -import glob -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -from scipy.io.wavfile import read -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None and not skip_optimizer and checkpoint_dict['optimizer'] is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - elif optimizer is None and not skip_optimizer: - #else: #Disable this line if Infer ,and enable the line upper - new_opt_dict = optimizer.state_dict() - new_opt_dict_params = new_opt_dict['param_groups'][0]['params'] - new_opt_dict['param_groups'] = checkpoint_dict['optimizer']['param_groups'] - new_opt_dict['param_groups'][0]['params'] = new_opt_dict_params - optimizer.load_state_dict(new_opt_dict) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): - try: - #assert "emb_g" not in k - # print("load", k) - new_state_dict[k] = saved_state_dict[k] - assert saved_state_dict[k].shape == v.shape, (saved_state_dict[k].shape, v.shape) - except: - print("error, %s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict, strict=False) - else: - model.load_state_dict(new_state_dict, strict=False) - print("load ") - logger.info("Loaded checkpoint '{}' (iteration {})".format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): - logger.info("Saving model and optimizer state at iteration {} to {}".format( - iteration, checkpoint_path)) - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - torch.save({'model': state_dict, - 'iteration': iteration, - 'optimizer': optimizer.state_dict(), - 'learning_rate': learning_rate}, checkpoint_path) - - -def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050): - for k, v in scalars.items(): - writer.add_scalar(k, v, global_step) - for k, v in histograms.items(): - writer.add_histogram(k, v, global_step) - for k, v in images.items(): - writer.add_image(k, v, global_step, dataformats='HWC') - for k, v in audios.items(): - writer.add_audio(k, v, global_step, audio_sampling_rate) - - -def latest_checkpoint_path(dir_path, regex="G_*.pth"): - f_list = glob.glob(os.path.join(dir_path, regex)) - f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) - x = f_list[-1] - print(x) - return x - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, default="./OUTPUT_MODEL", - help='Model name') - parser.add_argument('--cont', dest='cont', action="store_true", default=False, help="whether to continue training on the latest checkpoint") - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - hparams.cont = args.cont - return hparams - - -def clean_checkpoints(path_to_models='logs/44k/', n_ckpts_to_keep=2, sort_by_time=True): - """Freeing up space by deleting saved ckpts - - Arguments: - path_to_models -- Path to the model directory - n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth - sort_by_time -- True -> chronologically delete ckpts - False -> lexicographically delete ckpts - """ - import re - ckpts_files = [f for f in os.listdir(path_to_models) if os.path.isfile(os.path.join(path_to_models, f))] - name_key = (lambda _f: int(re.compile('._(\d+)\.pth').match(_f).group(1))) - time_key = (lambda _f: os.path.getmtime(os.path.join(path_to_models, _f))) - sort_key = time_key if sort_by_time else name_key - x_sorted = lambda _x: sorted([f for f in ckpts_files if f.startswith(_x) and not f.endswith('_0.pth')], - key=sort_key) - to_del = [os.path.join(path_to_models, fn) for fn in - (x_sorted('G')[:-n_ckpts_to_keep] + x_sorted('D')[:-n_ckpts_to_keep])] - del_info = lambda fn: logger.info(f".. Free up space by deleting ckpt {fn}") - del_routine = lambda x: [os.remove(x), del_info(x)] - rs = [del_routine(fn) for fn in to_del] - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/Next7years/CatHeiHei_v1/share_btn.py b/spaces/Next7years/CatHeiHei_v1/share_btn.py deleted file mode 100644 index 2d6ee6c6392b56194c26a4eee2a124833c3bcca8..0000000000000000000000000000000000000000 --- a/spaces/Next7years/CatHeiHei_v1/share_btn.py +++ /dev/null @@ -1,68 +0,0 @@ -community_icon_html = """""" - -loading_icon_html = """""" - -share_js = """async () => { - async function uploadFile(file){ - const UPLOAD_URL = 'https://huggingface.co/uploads'; - const response = await fetch(UPLOAD_URL, { - method: 'POST', - headers: { - 'Content-Type': file.type, - 'X-Requested-With': 'XMLHttpRequest', - }, - body: file, /// <- File inherits from Blob - }); - const url = await response.text(); - return url; - } - - const gradioEl = document.querySelector('body > gradio-app'); - const imgEls = gradioEl.querySelectorAll('#gallery img'); - const promptTxt = gradioEl.querySelector('#prompt-text-input input').value; - const shareBtnEl = gradioEl.querySelector('#share-btn'); - const shareIconEl = gradioEl.querySelector('#share-btn-share-icon'); - const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon'); - - if(!imgEls.length){ - return; - }; - - shareBtnEl.style.pointerEvents = 'none'; - shareIconEl.style.display = 'none'; - loadingIconEl.style.removeProperty('display'); - - const files = await Promise.all( - [...imgEls].map(async (imgEl) => { - const res = await fetch(imgEl.src); - const blob = await res.blob(); - const imgId = Date.now() % 200; - const fileName = `diffuse-the-rest-${{imgId}}.jpg`; - return new File([blob], fileName, { type: 'image/jpeg' }); - }) - ); - - const urls = await Promise.all(files.map((f) => uploadFile(f))); - const htmlImgs = urls.map(url => ``); - const descriptionMd = `
-${htmlImgs.join(`\n`)} -
`; - - const params = new URLSearchParams({ - title: promptTxt, - description: descriptionMd, - }); - - const paramsStr = params.toString(); - window.open(`https://huggingface.co/spaces/runwayml/stable-diffusion-v1-5/discussions/new?${paramsStr}`, '_blank'); - - shareBtnEl.style.removeProperty('pointer-events'); - shareIconEl.style.removeProperty('display'); - loadingIconEl.style.display = 'none'; -}""" diff --git a/spaces/Nortrom8844/summarize-long-text/summarize.py b/spaces/Nortrom8844/summarize-long-text/summarize.py deleted file mode 100644 index ab803b592794c3aa075b804f325e6c5d39b382ee..0000000000000000000000000000000000000000 --- a/spaces/Nortrom8844/summarize-long-text/summarize.py +++ /dev/null @@ -1,138 +0,0 @@ -import logging - -import torch -from tqdm.auto import tqdm -from transformers import AutoModelForSeq2SeqLM, AutoTokenizer - - -def load_model_and_tokenizer(model_name): - """ - load_model_and_tokenizer - a function that loads a model and tokenizer from huggingface - - Args: - model_name (str): the name of the model to load - Returns: - AutoModelForSeq2SeqLM: the model - AutoTokenizer: the tokenizer - """ - - model = AutoModelForSeq2SeqLM.from_pretrained( - model_name, - # low_cpu_mem_usage=True, - # use_cache=False, - ) - tokenizer = AutoTokenizer.from_pretrained(model_name) - model = model.to("cuda") if torch.cuda.is_available() else model - - logging.info(f"Loaded model {model_name}") - return model, tokenizer - - -def summarize_and_score(ids, mask, model, tokenizer, **kwargs): - """ - summarize_and_score - given a batch of ids and a mask, return a summary and a score for the summary - - Args: - ids (): the batch of ids - mask (): the attention mask for the batch - model (): the model to use for summarization - tokenizer (): the tokenizer to use for summarization - - Returns: - str: the summary of the batch - """ - - ids = ids[None, :] - mask = mask[None, :] - - input_ids = ids.to("cuda") if torch.cuda.is_available() else ids - attention_mask = mask.to("cuda") if torch.cuda.is_available() else mask - - global_attention_mask = torch.zeros_like(attention_mask) - # put global attention on token - global_attention_mask[:, 0] = 1 - - summary_pred_ids = model.generate( - input_ids, - attention_mask=attention_mask, - global_attention_mask=global_attention_mask, - output_scores=True, - return_dict_in_generate=True, - **kwargs, - ) - summary = tokenizer.batch_decode( - summary_pred_ids.sequences, - skip_special_tokens=True, - remove_invalid_values=True, - ) - score = round(summary_pred_ids.sequences_scores.cpu().numpy()[0], 4) - - return summary, score - - -def summarize_via_tokenbatches( - input_text: str, - model, - tokenizer, - batch_length=2048, - batch_stride=16, - **kwargs, -): - """ - summarize_via_tokenbatches - a function that takes a string and returns a summary - - Args: - input_text (str): the text to summarize - model (): the model to use for summarizationz - tokenizer (): the tokenizer to use for summarization - batch_length (int, optional): the length of each batch. Defaults to 2048. - batch_stride (int, optional): the stride of each batch. Defaults to 16. The stride is the number of tokens that overlap between batches. - - Returns: - str: the summary - """ - # log all input parameters - if batch_length < 512: - batch_length = 512 - print("WARNING: batch_length was set to 512") - print( - f"input parameters: {kwargs}, batch_length={batch_length}, batch_stride={batch_stride}" - ) - encoded_input = tokenizer( - input_text, - padding="max_length", - truncation=True, - max_length=batch_length, - stride=batch_stride, - return_overflowing_tokens=True, - add_special_tokens=False, - return_tensors="pt", - ) - - in_id_arr, att_arr = encoded_input.input_ids, encoded_input.attention_mask - gen_summaries = [] - - pbar = tqdm(total=len(in_id_arr)) - - for _id, _mask in zip(in_id_arr, att_arr): - - result, score = summarize_and_score( - ids=_id, - mask=_mask, - model=model, - tokenizer=tokenizer, - **kwargs, - ) - score = round(float(score), 4) - _sum = { - "input_tokens": _id, - "summary": result, - "summary_score": score, - } - gen_summaries.append(_sum) - print(f"\t{result[0]}\nScore:\t{score}") - pbar.update() - - pbar.close() - - return gen_summaries diff --git a/spaces/OAOA/DifFace/basicsr/utils/lmdb_util.py b/spaces/OAOA/DifFace/basicsr/utils/lmdb_util.py deleted file mode 100644 index a2b45ce01d5e32ddbf8354d71fd1c8678bede822..0000000000000000000000000000000000000000 --- a/spaces/OAOA/DifFace/basicsr/utils/lmdb_util.py +++ /dev/null @@ -1,199 +0,0 @@ -import cv2 -import lmdb -import sys -from multiprocessing import Pool -from os import path as osp -from tqdm import tqdm - - -def make_lmdb_from_imgs(data_path, - lmdb_path, - img_path_list, - keys, - batch=5000, - compress_level=1, - multiprocessing_read=False, - n_thread=40, - map_size=None): - """Make lmdb from images. - - Contents of lmdb. The file structure is: - - :: - - example.lmdb - ├── data.mdb - ├── lock.mdb - ├── meta_info.txt - - The data.mdb and lock.mdb are standard lmdb files and you can refer to - https://lmdb.readthedocs.io/en/release/ for more details. - - The meta_info.txt is a specified txt file to record the meta information - of our datasets. It will be automatically created when preparing - datasets by our provided dataset tools. - Each line in the txt file records 1)image name (with extension), - 2)image shape, and 3)compression level, separated by a white space. - - For example, the meta information could be: - `000_00000000.png (720,1280,3) 1`, which means: - 1) image name (with extension): 000_00000000.png; - 2) image shape: (720,1280,3); - 3) compression level: 1 - - We use the image name without extension as the lmdb key. - - If `multiprocessing_read` is True, it will read all the images to memory - using multiprocessing. Thus, your server needs to have enough memory. - - Args: - data_path (str): Data path for reading images. - lmdb_path (str): Lmdb save path. - img_path_list (str): Image path list. - keys (str): Used for lmdb keys. - batch (int): After processing batch images, lmdb commits. - Default: 5000. - compress_level (int): Compress level when encoding images. Default: 1. - multiprocessing_read (bool): Whether use multiprocessing to read all - the images to memory. Default: False. - n_thread (int): For multiprocessing. - map_size (int | None): Map size for lmdb env. If None, use the - estimated size from images. Default: None - """ - - assert len(img_path_list) == len(keys), ('img_path_list and keys should have the same length, ' - f'but got {len(img_path_list)} and {len(keys)}') - print(f'Create lmdb for {data_path}, save to {lmdb_path}...') - print(f'Totoal images: {len(img_path_list)}') - if not lmdb_path.endswith('.lmdb'): - raise ValueError("lmdb_path must end with '.lmdb'.") - if osp.exists(lmdb_path): - print(f'Folder {lmdb_path} already exists. Exit.') - sys.exit(1) - - if multiprocessing_read: - # read all the images to memory (multiprocessing) - dataset = {} # use dict to keep the order for multiprocessing - shapes = {} - print(f'Read images with multiprocessing, #thread: {n_thread} ...') - pbar = tqdm(total=len(img_path_list), unit='image') - - def callback(arg): - """get the image data and update pbar.""" - key, dataset[key], shapes[key] = arg - pbar.update(1) - pbar.set_description(f'Read {key}') - - pool = Pool(n_thread) - for path, key in zip(img_path_list, keys): - pool.apply_async(read_img_worker, args=(osp.join(data_path, path), key, compress_level), callback=callback) - pool.close() - pool.join() - pbar.close() - print(f'Finish reading {len(img_path_list)} images.') - - # create lmdb environment - if map_size is None: - # obtain data size for one image - img = cv2.imread(osp.join(data_path, img_path_list[0]), cv2.IMREAD_UNCHANGED) - _, img_byte = cv2.imencode('.png', img, [cv2.IMWRITE_PNG_COMPRESSION, compress_level]) - data_size_per_img = img_byte.nbytes - print('Data size per image is: ', data_size_per_img) - data_size = data_size_per_img * len(img_path_list) - map_size = data_size * 10 - - env = lmdb.open(lmdb_path, map_size=map_size) - - # write data to lmdb - pbar = tqdm(total=len(img_path_list), unit='chunk') - txn = env.begin(write=True) - txt_file = open(osp.join(lmdb_path, 'meta_info.txt'), 'w') - for idx, (path, key) in enumerate(zip(img_path_list, keys)): - pbar.update(1) - pbar.set_description(f'Write {key}') - key_byte = key.encode('ascii') - if multiprocessing_read: - img_byte = dataset[key] - h, w, c = shapes[key] - else: - _, img_byte, img_shape = read_img_worker(osp.join(data_path, path), key, compress_level) - h, w, c = img_shape - - txn.put(key_byte, img_byte) - # write meta information - txt_file.write(f'{key}.png ({h},{w},{c}) {compress_level}\n') - if idx % batch == 0: - txn.commit() - txn = env.begin(write=True) - pbar.close() - txn.commit() - env.close() - txt_file.close() - print('\nFinish writing lmdb.') - - -def read_img_worker(path, key, compress_level): - """Read image worker. - - Args: - path (str): Image path. - key (str): Image key. - compress_level (int): Compress level when encoding images. - - Returns: - str: Image key. - byte: Image byte. - tuple[int]: Image shape. - """ - - img = cv2.imread(path, cv2.IMREAD_UNCHANGED) - if img.ndim == 2: - h, w = img.shape - c = 1 - else: - h, w, c = img.shape - _, img_byte = cv2.imencode('.png', img, [cv2.IMWRITE_PNG_COMPRESSION, compress_level]) - return (key, img_byte, (h, w, c)) - - -class LmdbMaker(): - """LMDB Maker. - - Args: - lmdb_path (str): Lmdb save path. - map_size (int): Map size for lmdb env. Default: 1024 ** 4, 1TB. - batch (int): After processing batch images, lmdb commits. - Default: 5000. - compress_level (int): Compress level when encoding images. Default: 1. - """ - - def __init__(self, lmdb_path, map_size=1024**4, batch=5000, compress_level=1): - if not lmdb_path.endswith('.lmdb'): - raise ValueError("lmdb_path must end with '.lmdb'.") - if osp.exists(lmdb_path): - print(f'Folder {lmdb_path} already exists. Exit.') - sys.exit(1) - - self.lmdb_path = lmdb_path - self.batch = batch - self.compress_level = compress_level - self.env = lmdb.open(lmdb_path, map_size=map_size) - self.txn = self.env.begin(write=True) - self.txt_file = open(osp.join(lmdb_path, 'meta_info.txt'), 'w') - self.counter = 0 - - def put(self, img_byte, key, img_shape): - self.counter += 1 - key_byte = key.encode('ascii') - self.txn.put(key_byte, img_byte) - # write meta information - h, w, c = img_shape - self.txt_file.write(f'{key}.png ({h},{w},{c}) {self.compress_level}\n') - if self.counter % self.batch == 0: - self.txn.commit() - self.txn = self.env.begin(write=True) - - def close(self): - self.txn.commit() - self.env.close() - self.txt_file.close() diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/lru_cache_dataset.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/lru_cache_dataset.py deleted file mode 100644 index a7854ac1701392754ce5795cafe9c634671aebdf..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/lru_cache_dataset.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from functools import lru_cache - -from . import BaseWrapperDataset - - -class LRUCacheDataset(BaseWrapperDataset): - def __init__(self, dataset, token=None): - super().__init__(dataset) - - @lru_cache(maxsize=8) - def __getitem__(self, index): - return self.dataset[index] - - @lru_cache(maxsize=8) - def collater(self, samples): - return self.dataset.collater(samples) diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/modules/transformer_sentence_encoder_layer.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/modules/transformer_sentence_encoder_layer.py deleted file mode 100644 index f869c4b2f8fb15f96a292e39bd293df7898a4fce..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/modules/transformer_sentence_encoder_layer.py +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from typing import Callable, Optional - -import torch -import torch.nn as nn -from fairseq import utils -from fairseq.modules import LayerNorm, MultiheadAttention -from fairseq.modules.fairseq_dropout import FairseqDropout -from fairseq.modules.quant_noise import quant_noise - - -class TransformerSentenceEncoderLayer(nn.Module): - """ - Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained - models. - """ - - def __init__( - self, - embedding_dim: int = 768, - ffn_embedding_dim: int = 3072, - num_attention_heads: int = 8, - dropout: float = 0.1, - attention_dropout: float = 0.1, - activation_dropout: float = 0.1, - activation_fn: str = "relu", - export: bool = False, - q_noise: float = 0.0, - qn_block_size: int = 8, - init_fn: Callable = None, - ) -> None: - super().__init__() - - if init_fn is not None: - init_fn() - - # Initialize parameters - self.embedding_dim = embedding_dim - self.num_attention_heads = num_attention_heads - self.attention_dropout = attention_dropout - self.q_noise = q_noise - self.qn_block_size = qn_block_size - - self.dropout_module = FairseqDropout( - dropout, module_name=self.__class__.__name__ - ) - self.activation_dropout_module = FairseqDropout( - activation_dropout, module_name=self.__class__.__name__ - ) - - # Initialize blocks - self.activation_fn = utils.get_activation_fn(activation_fn) - self.self_attn = self.build_self_attention( - self.embedding_dim, - num_attention_heads, - dropout=attention_dropout, - self_attention=True, - q_noise=q_noise, - qn_block_size=qn_block_size, - ) - - # layer norm associated with the self attention layer - self.self_attn_layer_norm = LayerNorm(self.embedding_dim, export=export) - - self.fc1 = self.build_fc1( - self.embedding_dim, - ffn_embedding_dim, - q_noise=q_noise, - qn_block_size=qn_block_size, - ) - self.fc2 = self.build_fc2( - ffn_embedding_dim, - self.embedding_dim, - q_noise=q_noise, - qn_block_size=qn_block_size, - ) - - # layer norm associated with the position wise feed-forward NN - self.final_layer_norm = LayerNorm(self.embedding_dim, export=export) - - def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size): - return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size) - - def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size): - return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size) - - def build_self_attention( - self, - embed_dim, - num_attention_heads, - dropout, - self_attention, - q_noise, - qn_block_size, - ): - return MultiheadAttention( - embed_dim, - num_attention_heads, - dropout=dropout, - self_attention=True, - q_noise=q_noise, - qn_block_size=qn_block_size, - ) - - def forward( - self, - x: torch.Tensor, - self_attn_mask: Optional[torch.Tensor] = None, - self_attn_padding_mask: Optional[torch.Tensor] = None, - ): - """ - LayerNorm is applied either before or after the self-attention/ffn - modules similar to the original Transformer implementation. - """ - residual = x - x, attn = self.self_attn( - query=x, - key=x, - value=x, - key_padding_mask=self_attn_padding_mask, - need_weights=False, - attn_mask=self_attn_mask, - ) - x = self.dropout_module(x) - x = residual + x - x = self.self_attn_layer_norm(x) - - residual = x - x = self.activation_fn(self.fc1(x)) - x = self.activation_dropout_module(x) - x = self.fc2(x) - x = self.dropout_module(x) - x = residual + x - x = self.final_layer_norm(x) - return x, attn diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_recognition/README.md b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_recognition/README.md deleted file mode 100644 index 17030bf0fd50bb843a508e13e97ed436eae33287..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_recognition/README.md +++ /dev/null @@ -1,83 +0,0 @@ -### 2021 Update: We are merging this example into the [S2T framework](../speech_to_text), which supports more generic speech-to-text tasks (e.g. speech translation) and more flexible data processing pipelines. Please stay tuned. - -# Speech Recognition -`examples/speech_recognition` is implementing ASR task in Fairseq, along with needed features, datasets, models and loss functions to train and infer model described in [Transformers with convolutional context for ASR (Abdelrahman Mohamed et al., 2019)](https://arxiv.org/abs/1904.11660). - - -## Additional dependencies -On top of main fairseq dependencies there are couple more additional requirements. - -1) Please follow the instructions to install [torchaudio](https://github.com/pytorch/audio). This is required to compute audio fbank features. -2) [Sclite](http://www1.icsi.berkeley.edu/Speech/docs/sctk-1.2/sclite.htm#sclite_name_0) is used to measure WER. Sclite can be downloaded and installed from source from sctk package [here](http://www.openslr.org/4/). Training and inference doesn't require Sclite dependency. -3) [sentencepiece](https://github.com/google/sentencepiece) is required in order to create dataset with word-piece targets. - -## Preparing librispeech data -``` -./examples/speech_recognition/datasets/prepare-librispeech.sh $DIR_TO_SAVE_RAW_DATA $DIR_FOR_PREPROCESSED_DATA -``` - -## Training librispeech data -``` -python train.py $DIR_FOR_PREPROCESSED_DATA --save-dir $MODEL_PATH --max-epoch 80 --task speech_recognition --arch vggtransformer_2 --optimizer adadelta --lr 1.0 --adadelta-eps 1e-8 --adadelta-rho 0.95 --clip-norm 10.0 --max-tokens 5000 --log-format json --log-interval 1 --criterion cross_entropy_acc --user-dir examples/speech_recognition/ -``` - -## Inference for librispeech -`$SET` can be `test_clean` or `test_other` -Any checkpoint in `$MODEL_PATH` can be selected. In this example we are working with `checkpoint_last.pt` -``` -python examples/speech_recognition/infer.py $DIR_FOR_PREPROCESSED_DATA --task speech_recognition --max-tokens 25000 --nbest 1 --path $MODEL_PATH/checkpoint_last.pt --beam 20 --results-path $RES_DIR --batch-size 40 --gen-subset $SET --user-dir examples/speech_recognition/ -``` - -## Inference for librispeech -``` -sclite -r ${RES_DIR}/ref.word-checkpoint_last.pt-${SET}.txt -h ${RES_DIR}/hypo.word-checkpoint_last.pt-${SET}.txt -i rm -o all stdout > $RES_REPORT -``` -`Sum/Avg` row from first table of the report has WER - -## Using flashlight (previously called [wav2letter](https://github.com/facebookresearch/wav2letter)) components -[flashlight](https://github.com/facebookresearch/flashlight) now has integration with fairseq. Currently this includes: - -* AutoSegmentationCriterion (ASG) -* flashlight-style Conv/GLU model -* flashlight's beam search decoder - -To use these, follow the instructions on [this page](https://github.com/facebookresearch/flashlight/tree/master/bindings/python) to install python bindings. - -## Training librispeech data (flashlight style, Conv/GLU + ASG loss) -Training command: -``` -python train.py $DIR_FOR_PREPROCESSED_DATA --save-dir $MODEL_PATH --max-epoch 100 --task speech_recognition --arch w2l_conv_glu_enc --batch-size 4 --optimizer sgd --lr 0.3,0.8 --momentum 0.8 --clip-norm 0.2 --max-tokens 50000 --log-format json --log-interval 100 --num-workers 0 --sentence-avg --criterion asg_loss --asg-transitions-init 5 --max-replabel 2 --linseg-updates 8789 --user-dir examples/speech_recognition -``` - -Note that ASG loss currently doesn't do well with word-pieces. You should prepare a dataset with character targets by setting `nbpe=31` in `prepare-librispeech.sh`. - -## Inference for librispeech (flashlight decoder, n-gram LM) -Inference command: -``` -python examples/speech_recognition/infer.py $DIR_FOR_PREPROCESSED_DATA --task speech_recognition --seed 1 --nbest 1 --path $MODEL_PATH/checkpoint_last.pt --gen-subset $SET --results-path $RES_DIR --w2l-decoder kenlm --kenlm-model $KENLM_MODEL_PATH --lexicon $LEXICON_PATH --beam 200 --beam-threshold 15 --lm-weight 1.5 --word-score 1.5 --sil-weight -0.3 --criterion asg_loss --max-replabel 2 --user-dir examples/speech_recognition -``` - -`$KENLM_MODEL_PATH` should be a standard n-gram language model file. `$LEXICON_PATH` should be a flashlight-style lexicon (list of known words and their spellings). For ASG inference, a lexicon line should look like this (note the repetition labels): -``` -doorbell D O 1 R B E L 1 ▁ -``` -For CTC inference with word-pieces, repetition labels are not used and the lexicon should have most common spellings for each word (one can use sentencepiece's `NBestEncodeAsPieces` for this): -``` -doorbell ▁DOOR BE LL -doorbell ▁DOOR B E LL -doorbell ▁DO OR BE LL -doorbell ▁DOOR B EL L -doorbell ▁DOOR BE L L -doorbell ▁DO OR B E LL -doorbell ▁DOOR B E L L -doorbell ▁DO OR B EL L -doorbell ▁DO O R BE LL -doorbell ▁DO OR BE L L -``` -Lowercase vs. uppercase matters: the *word* should match the case of the n-gram language model (i.e. `$KENLM_MODEL_PATH`), while the *spelling* should match the case of the token dictionary (i.e. `$DIR_FOR_PREPROCESSED_DATA/dict.txt`). - -## Inference for librispeech (flashlight decoder, viterbi only) -Inference command: -``` -python examples/speech_recognition/infer.py $DIR_FOR_PREPROCESSED_DATA --task speech_recognition --seed 1 --nbest 1 --path $MODEL_PATH/checkpoint_last.pt --gen-subset $SET --results-path $RES_DIR --w2l-decoder viterbi --criterion asg_loss --max-replabel 2 --user-dir examples/speech_recognition -``` diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/modules/gelu.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/modules/gelu.py deleted file mode 100644 index a2f1ecff4a3ae3de3eb7d327b9163c46b18a15ed..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/modules/gelu.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -""" -See "Gaussian Error Linear Units (GELUs)" by Dan Hendrycks and Kevin Gimpel with -the corresponding GitHub repo: https://github.com/hendrycks/GELUs -""" - -import math - -import torch -import torch.nn as nn - - -def gelu_accurate(x): - if not hasattr(gelu_accurate, "_a"): - gelu_accurate._a = math.sqrt(2 / math.pi) - return ( - 0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3)))) - ) - - -def gelu(x: torch.Tensor) -> torch.Tensor: - return torch.nn.functional.gelu(x.float()).type_as(x) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/latent_depth/latent_depth_src/models/latent_transformer.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/latent_depth/latent_depth_src/models/latent_transformer.py deleted file mode 100644 index 6a825301a452bd935deafdaf78fa2427ca9a469e..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/latent_depth/latent_depth_src/models/latent_transformer.py +++ /dev/null @@ -1,156 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from typing import Any, Dict, Optional - -import torch.nn as nn -from fairseq.models.fairseq_encoder import EncoderOut -from fairseq.models.transformer import TransformerDecoder, TransformerEncoder -from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer -from torch import Tensor - -from ..modules.latent_layers import LayerSelect - - -class LatentTransformerEncoder(TransformerEncoder): - """Latent depth (https://arxiv.org/abs/2009.13102) implemented in - TransformerEncoder. - """ - - def __init__(self, args, dictionary, embed_tokens, num_logits=1): - self.num_logits = num_logits - self.num_layers = args.encoder_layers - super().__init__(args, dictionary, embed_tokens) - self.layer_select = LayerSelect( - num_layers=self.num_layers, - num_logits=self.num_logits, - soft_select=getattr(args, "soft_select", False), - sampling_tau=getattr(args, "sampling_tau", 5.), - ) - self.lang_idx = None - self.layers = nn.ModuleList( - [self._build_encoder_layer(args, idx) for idx in range(args.encoder_layers)] - ) - - def set_lang_idx(self, lang_idx): - self.lang_idx = lang_idx - - def _build_encoder_layer(self, args, idx=None): - return LatentTransformerEncoderLayer(args, idx, layer_select=self.layer_select) - - def forward(self, src_tokens, src_lengths, return_all_hiddens: bool = False): - self.layer_select.sample(self.lang_idx) - return super().forward(src_tokens, src_lengths, return_all_hiddens) - - -class LatentTransformerEncoderLayer(TransformerEncoderLayer): - """Encoder layer with each (non_residual) block weighted by samples of Bernouli - or Gumbel Signmoid samples. - - Args: - args (argparse.Namespace): parsed command-line arguments from standard - TransformerEncoderLayer. - idx (int): layer index (used to retrieve samples). - layer_select (LayerSelect, optional): instance of LayerSelect module with logits - parameters and sampling method. - """ - - def __init__(self, args, idx, layer_select=None): - super().__init__(args) - self.idx = idx - self.layer_select = layer_select - - def residual_connection(self, x, residual): - return residual + x * self.layer_select(self.idx) - - -class LatentTransformerDecoder(TransformerDecoder): - """Latent depth (https://arxiv.org/abs/2009.13102) implemented in - TransformerDecoder. - """ - - def __init__( - self, args, dictionary, embed_tokens, no_encoder_attn=False, num_logits=1 - ): - self.num_logits = num_logits - self.num_layers = args.decoder_layers - super().__init__( - args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn - ) - self.layer_select = LayerSelect( - num_layers=self.num_layers, - num_logits=self.num_logits, - soft_select=getattr(args, "soft_select", False), - sampling_tau=getattr(args, "sampling_tau", 5.), - ) - self.lang_idx = None - self.layers = nn.ModuleList( - [ - self._build_decoder_layer(args, no_encoder_attn, idx) - for idx in range(args.decoder_layers) - ] - ) - - def set_lang_idx(self, lang_idx): - self.lang_idx = lang_idx - - def _build_decoder_layer(self, args, no_encoder_attn=False, idx=None): - return LatentTransformerDecoderLayer( - args, idx, layer_select=self.layer_select, no_encoder_attn=no_encoder_attn - ) - - def forward( - self, - prev_output_tokens, - encoder_out: Optional[EncoderOut] = None, - incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, - features_only: bool = False, - alignment_layer: Optional[int] = None, - alignment_heads: Optional[int] = None, - src_lengths: Optional[Any] = None, - return_all_hiddens: bool = False, - ): - self.layer_select.sample(self.lang_idx) - return super().forward( - prev_output_tokens=prev_output_tokens, - encoder_out=encoder_out, - incremental_state=incremental_state, - features_only=features_only, - alignment_layer=alignment_layer, - src_lengths=src_lengths, - return_all_hiddens=return_all_hiddens, - ) - - -class LatentTransformerDecoderLayer(TransformerDecoderLayer): - """Decoder layer with each (non_residual) block weighted by samples of Bernouli - or Gumbel Signmoid samples. - - Args: - args (argparse.Namespace): parsed command-line arguments from standard - TransformerDecoderLayer. - idx (int): layer index (used to retrieve samples). - layer_select (LayerSelect, optional): instance of LayerSelect module with logits - parameters and sampling method. - no_encoder_attn (bool, optional): whether to attend to encoder outputs - (default: False). - - """ - - def __init__( - self, - args, - idx, - layer_select=None, - no_encoder_attn=False, - add_bias_kv=False, - add_zero_attn=False, - ): - super().__init__(args, no_encoder_attn, add_bias_kv, add_zero_attn) - self.idx = idx - self.layer_select = layer_select - - def residual_connection(self, x, residual): - return residual + x * self.layer_select(self.idx) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_recognition/data/__init__.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_recognition/data/__init__.py deleted file mode 100644 index 47bb6e24ddf25aa4fd5bf0fe9672f89099efb9b4..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_recognition/data/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .asr_dataset import AsrDataset - - -__all__ = [ - "AsrDataset", -] diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/quantization/scalar/modules/qconv.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/quantization/scalar/modules/qconv.py deleted file mode 100644 index 83788c6f71fd41e61fd115681a22d53ce8b8362c..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/quantization/scalar/modules/qconv.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch -import torch.nn.functional as F -from torch.nn.modules.conv import _ConvNd -from torch.nn.modules.utils import _pair - -from ..ops import emulate_int - - -class IntConv2d(_ConvNd): - """ - Quantized counterpart of the nn.Conv2d module that applies QuantNoise during training. - - Args: - - standard nn.Conv2d parameters - - p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights) - - bits: number of bits - - method: choose among {"tensor", "histogram", "channel"} - - update_step: recompute scale and zero_point every update_steps iterations - - Remarks: - - We use the straight-thgourh estimator so that the gradients - back-propagate nicely in the network, this is implemented with - the detach() trick - - Parameters scale and zero_point are recomputed every update_step - forward pass to reduce the overhead - - At test time, the weights are fully quantized - """ - - def __init__( - self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - bias=True, - padding_mode="zeros", - p=0, - bits=8, - method="histogram", - update_step=1000, - ): - kernel_size = _pair(kernel_size) - stride = _pair(stride) - padding = _pair(padding) - dilation = _pair(dilation) - super(IntConv2d, self).__init__( - in_channels, - out_channels, - kernel_size, - stride, - padding, - dilation, - False, - _pair(0), - groups, - bias, - padding_mode, - ) - - # quantization parameters - self.p = p - self.bits = bits - self.method = method - self.update_step = update_step - self.counter = 0 - - def _conv_forward(self, input, weight): - if self.padding_mode != "zeros": - return F.conv2d( - F.pad(input, self._padding_repeated_twice, mode=self.padding_mode), - weight, - self.bias, - self.stride, - _pair(0), - self.dilation, - self.groups, - ) - return F.conv2d( - input, - weight, - self.bias, - self.stride, - self.padding, - self.dilation, - self.groups, - ) - - def forward(self, input): - # train with QuantNoise and evaluate the fully quantized network - p = self.p if self.training else 1 - - # update parameters every 100 iterations - if self.counter % self.update_step == 0: - self.scale = None - self.zero_point = None - self.counter += 1 - - # quantize weight - weight_quantized, self.scale, self.zero_point = emulate_int( - self.weight.detach(), - bits=self.bits, - method=self.method, - scale=self.scale, - zero_point=self.zero_point, - ) - - # mask to apply noise - mask = torch.zeros_like(self.weight) - mask.bernoulli_(1 - p) - noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0) - - # using straight-through estimator (STE) - clamp_low = -self.scale * self.zero_point - clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point) - weight = ( - torch.clamp(self.weight, clamp_low.item(), clamp_high.item()) - + noise.detach() - ) - - # return output - output = self._conv_forward(input, weight) - return output - - def extra_repr(self): - return ( - "in_channels={}, out_channels={}, kernel_size={}, stride={}, " - "padding={}, dilation={}, groups={}, bias={}, quant_noise={}, " - "bits={}, method={}".format( - self.in_channels, - self.out_channels, - self.kernel_size, - self.stride, - self.padding, - self.dilation, - self.groups, - self.bias is not None, - self.p, - self.bits, - self.method, - ) - ) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/tasks/online_backtranslation.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/tasks/online_backtranslation.py deleted file mode 100644 index 2e27ca237cde1980b2c3ca497e12f458da230c37..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/tasks/online_backtranslation.py +++ /dev/null @@ -1,682 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import contextlib -import json -import logging -import math -import os -from argparse import Namespace -from collections import OrderedDict, defaultdict -from pathlib import Path -from typing import Dict, Sequence, Tuple -from argparse import ArgumentError - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F - -import fairseq -from fairseq import metrics, options, utils -from fairseq.data import ( - FairseqDataset, - LanguagePairDataset, - NoisingDataset, - PrependTokenDataset, - RoundRobinZipDatasets, - TransformEosLangPairDataset, - data_utils, - encoders, -) -from fairseq.sequence_generator import SequenceGenerator -from fairseq.tasks import register_task -from fairseq.tasks.translation import TranslationTask, load_langpair_dataset - -logger = logging.getLogger(__name__) - - -class PiecewiseLinearFn: - """Piecewise linear function. Can be configured with a string.""" - - def __init__(self, pieces: Sequence[Tuple[int, float]]): - assert pieces == sorted( - pieces - ), f"PiecewiseLinearFn configuration should be sorted, received: {pieces}" - - self.pieces = pieces - - def __call__(self, x: int) -> float: - for i, (x_a, y_a) in enumerate(self.pieces[:-1]): - x_b, y_b = self.pieces[i + 1] - if x_a <= x <= x_b: - return y_a + (x - x_a) * (y_b - y_a) / (x_b - x_a) - - return self.pieces[-1][1] - - @staticmethod - def from_string(configuration: str) -> "PiecewiseLinearFn": - """ - Parse the configuration of lambda coefficient (for scheduling). - x = "3" # lambda will be a constant equal to x - x = "0:1,1000:0" # lambda will start from 1 and linearly decrease - # to 0 during the first 1000 iterations - x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000 - # iterations, then will linearly increase to 1 until iteration 2000 - """ - if isinstance(configuration, float): - return PiecewiseLinearFn([(0, configuration)]) - - try: - parts = configuration.split(",") - if len(parts) == 1: - v = float(configuration) - return PiecewiseLinearFn([(0, v)]) - - split = [s.split(":") for s in parts] - pieces = [(int(t), float(v)) for t, v in split] - return PiecewiseLinearFn(pieces) - except Exception: - raise ValueError( - f"Invalid PiecewiseLinearFn configuration: {configuration!r}" - ) - - @staticmethod - def one() -> "PiecewiseLinearFn": - return PiecewiseLinearFn([(0, 1.0)]) - - -@register_task("online_backtranslation") -class OnlineBackTranslationTask(TranslationTask): - @staticmethod - def add_args(parser): - """Add task-specific arguments to the parser.""" - # fmt: off - # Generic translation args - parser.add_argument('data', help='colon separated path to data directories list, \ - will be iterated upon during epochs in round-robin manner; \ - however, valid and test data are always in the first directory to \ - avoid the need for repeating them in all directories') - parser.add_argument('--mono-langs', metavar='MONO_LANGS', - help='monolingual languages for training') - parser.add_argument('--valid-lang-pairs', default=None, metavar='VALID_LANG_PAIRS', - help='language pairs for validation') - parser.add_argument('--load-alignments', action='store_true', - help='load the binarized alignments') - parser.add_argument('--left-pad-source', default='False', type=str, metavar='BOOL', - help='pad the source on the left') - parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL', - help='pad the target on the left') - parser.add_argument('--upsample-primary', default=1, type=int, - help='amount to upsample primary dataset') - try: - parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N', - help='max number of tokens in the source sequence') - parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', - help='max number of tokens in the target sequence') - except ArgumentError: - # this might have already been defined. Once we transition this to hydra it should be fine to add it here. - pass - parser.add_argument('--truncate-source', action='store_true', default=False, - help='truncate source to max-source-positions') - parser.add_argument('--num-batch-buckets', default=0, type=int, metavar='N', - help='if >0, then bucket source and target lengths into N ' - 'buckets and pad accordingly; this is useful on TPUs ' - 'to minimize the number of compilations') - - # Denoising args - parser.add_argument('--max-word-shuffle-distance', default=3.0, type=float, metavar='N', - help='maximum word shuffle distance for denoising autoencoding data generation') - parser.add_argument('--word-dropout-prob', default=0.1, type=float, metavar='N', - help='word dropout probability for denoising autoencoding data generation') - parser.add_argument('--word-blanking-prob', default=0.2, type=float, metavar='N', - help='word blanking probability for denoising autoencoding data generation') - - # Backtranslation args - parser.add_argument('--lambda-bt', default="1.0", type=str, metavar='N', - help='back-translation weight') - parser.add_argument('--lambda-dae', default="1.0", type=str, metavar='N', - help='denoising auto-encoder weight') - - # Evaluation args - parser.add_argument('--generate-one-by-one', action='store_true', - help='generate one sentence at a time for backtranslation') - - parser.add_argument('--eval-bleu', action='store_true', - help='evaluation with BLEU scores') - parser.add_argument('--eval-bleu-detok', type=str, default="space", - help='detokenize before computing BLEU (e.g., "moses"); ' - 'required if using --eval-bleu; use "space" to ' - 'disable detokenization; see fairseq.data.encoders ' - 'for other options') - parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON', - help='args for building the tokenizer, if needed') - parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False, - help='compute tokenized BLEU instead of sacrebleu') - parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None, - help='remove BPE before computing BLEU') - parser.add_argument('--eval-bleu-args', type=str, metavar='JSON', - help='generation args for BLUE scoring, ' - 'e.g., \'{"beam": 4, "lenpen": 0.6}\'') - parser.add_argument('--eval-bleu-print-samples', action='store_true', - help='print sample generations during validation') - # fmt: on - - def __init__(self, args, common_dict, mono_langs, valid_lang_pairs): - super().__init__(args, common_dict, common_dict) - self.common_dict = common_dict - self.mono_langs = mono_langs - self.valid_lang_pairs = valid_lang_pairs - - self.SHOW_SAMPLES_INTERVAL = 1000 - # Start by showing samples - self._show_samples_ctr = self.SHOW_SAMPLES_INTERVAL - self.SHOW_SAMPLES_NUMBER = 5 - self.lambda_bt = PiecewiseLinearFn.from_string(args.lambda_bt) - self.lambda_dae = PiecewiseLinearFn.from_string(args.lambda_dae) - - self.args = args - self.data = utils.split_paths(self.args.data) - if len(self.data) == 1: - shards = list(Path(self.data[0]).glob("shard*")) - if len(shards) > 0: - # keep this as strings, since it can also be a manifold path - old_data = self.data - self.data = [str(shard) for shard in shards] - logging.warning(f"Expanded data directory {old_data} to {self.data}") - - @classmethod - def setup_task(cls, args, **kwargs): - """Setup the task (e.g., load dictionaries). - - Args: - args (argparse.Namespace): parsed command-line arguments - """ - args.left_pad_source = options.eval_bool(args.left_pad_source) - args.left_pad_target = options.eval_bool(args.left_pad_target) - - paths = utils.split_paths(args.data) - assert len(paths) > 0 - assert args.mono_langs is not None - - mono_langs = args.mono_langs.split(",") - valid_lang_pairs = args.valid_lang_pairs.split(",") - - # load dictionary - dict_path = os.path.join(paths[0], "dict.txt") - common_dict = cls.load_dictionary(dict_path) - - return cls(args, common_dict, mono_langs, valid_lang_pairs) - - def load_dataset(self, split, epoch=1, combine=False, **kwargs) -> FairseqDataset: - """Load a given dataset split. - - Args: - split (str): name of the split (e.g., train, valid, test) - """ - if split == "train": - data_path = self.data[(epoch - 1) % len(self.data)] - dataset = self.load_train_dataset(data_path) - else: - # valid/test should always be the same. - dataset = self.load_translation_dataset(split, self.data[0]) - - self.datasets[split] = dataset - return dataset - - def load_train_dataset(self, data_path: str) -> FairseqDataset: - """The training dataset is made of backtranslation dataset and denoising dataset.""" - data = [] - for lang in self.mono_langs: - train_path = os.path.join(data_path, lang, "train") - # TODO: could we do the BT using denoise sample ? - # this would half the data loading work - data.append((f"{lang}-BT", self.load_bt_dataset(train_path, lang))) - data.append( - (f"{lang}-DENOISE", self.load_denoise_dataset(train_path, lang)) - ) - - return RoundRobinZipDatasets(OrderedDict(data)) - - def _langpair_dataset( - self, src: FairseqDataset, tgt: FairseqDataset - ) -> LanguagePairDataset: - return LanguagePairDataset( - src, - src.sizes, - self.dictionary, - tgt=tgt, - tgt_sizes=tgt.sizes, - tgt_dict=self.dictionary, - left_pad_source=self.args.left_pad_source, - left_pad_target=self.args.left_pad_target, - # TODO: should we shuffle ? we are already sorting batch by sizes so ? - # shuffle=True, - ) - - def _prepend_lang_bos_to_target( - self, dataset: LanguagePairDataset, lang: str - ) -> LanguagePairDataset: - bos = _lang_token_index(self.dictionary, lang) - return TransformEosLangPairDataset( - dataset, - src_eos=self.dictionary.eos(), - new_src_eos=self.dictionary.eos(), - tgt_bos=self.dictionary.eos(), - new_tgt_bos=bos, - ) - - def load_bt_dataset(self, data_path: str, lang: str) -> FairseqDataset: - """The BT dataset is generated with (tgt, tgt) pairs. - The actual translation to a (generated_src, tgt) pair - is done on the fly during training. - """ - mono_dataset = data_utils.load_indexed_dataset( - data_path, self.common_dict, self.args.dataset_impl - ) - assert mono_dataset is not None, f"No dataset found for {lang}" - - mono_dataset_src = PrependTokenDataset( - mono_dataset, _lang_token_index(self.dictionary, lang) - ) - - mono_dataset_bt = self._langpair_dataset(mono_dataset_src, mono_dataset) - logger.info( - f"mono_lang = {lang} " - f"lang token index = {_lang_token_index(self.dictionary, lang)} " - f"lang token = {_lang_token(lang)}" - ) - - mono_dataset_bt = self._prepend_lang_bos_to_target(mono_dataset_bt, lang) - return mono_dataset_bt - - def load_denoise_dataset(self, data_path: str, lang: str) -> FairseqDataset: - """Classic denoising dataset""" - dataset = data_utils.load_indexed_dataset( - data_path, self.common_dict, self.args.dataset_impl - ) - noisy_dataset = NoisingDataset( - dataset, - self.dictionary, - seed=1, - max_word_shuffle_distance=self.args.max_word_shuffle_distance, - word_dropout_prob=self.args.word_dropout_prob, - word_blanking_prob=self.args.word_blanking_prob, - ) - noisy_dataset = PrependTokenDataset( - noisy_dataset, _lang_token_index(self.dictionary, lang) - ) - - clean_dataset = data_utils.load_indexed_dataset( - data_path, self.common_dict, self.args.dataset_impl - ) - denoising_dataset = self._langpair_dataset(noisy_dataset, clean_dataset) - denoising_dataset = self._prepend_lang_bos_to_target(denoising_dataset, lang) - return denoising_dataset - - def load_translation_dataset( - self, split: str, data_path: str, combine: bool = False - ): - # only judging with one language pair for the moment, - # since ConcatDataset doesn't work as expected - assert len(self.valid_lang_pairs) == 1, "For now..." - valid_lang_pair = self.valid_lang_pairs[0] - src, tgt = valid_lang_pair.split("-") - - # use the same function than TranslationTask - src_tgt_dt = load_langpair_dataset( - data_path, - split, - src, - self.common_dict, - tgt, - self.common_dict, - combine=combine, - dataset_impl=self.args.dataset_impl, - upsample_primary=self.args.upsample_primary, - left_pad_source=self.args.left_pad_source, - left_pad_target=self.args.left_pad_target, - max_source_positions=self.args.max_source_positions, - max_target_positions=self.args.max_target_positions, - load_alignments=self.args.load_alignments, - truncate_source=self.args.truncate_source, - num_buckets=self.args.num_batch_buckets, - shuffle=(split != "test"), - prepend_bos_src=_lang_token_index(self.dictionary, src), - ) - - src_tgt_eos_dt = self._prepend_lang_bos_to_target(src_tgt_dt, tgt) - src_tgt_eos_dt.args = self.args - return src_tgt_eos_dt - - def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None): - raise NotImplementedError - - def build_model(self, args): - # torch.autograd.set_detect_anomaly(True) - model = super().build_model(args) - - add_secial_tokens_to_dict_and_model(self.common_dict, model, self.mono_langs) - - self.sequence_generators = {} - for mono_lang in self.mono_langs: - self.sequence_generators[mono_lang] = SequenceGenerator( - [model], - tgt_dict=self.dictionary, - beam_size=1, - max_len_a=1.3, - max_len_b=5, - min_len=5, - # keep 1 to be able to prepend bos - max_len=model.max_decoder_positions() - 1, - ) - - if getattr(args, "eval_bleu", False): - assert getattr(args, "eval_bleu_detok", None) is not None, ( - "--eval-bleu-detok is required if using --eval-bleu; " - "try --eval-bleu-detok=moses (or --eval-bleu-detok=space " - "to disable detokenization, e.g., when using sentencepiece)" - ) - detok_args = json.loads(getattr(args, "eval_bleu_detok_args", "{}") or "{}") - self.tokenizer = encoders.build_tokenizer( - Namespace( - tokenizer=getattr(args, "eval_bleu_detok", None), **detok_args - ) - ) - - gen_args = json.loads(getattr(args, "eval_bleu_args", "{}") or "{}") - self.bleu_sequence_generator = self.build_generator( - [model], Namespace(**gen_args) - ) - - return model - - def max_positions(self): - """Return the max sentence length allowed by the task.""" - return (self.args.max_source_positions, self.args.max_target_positions) - - @property - def dictionary(self): - """Return the source :class:`~fairseq.data.Dictionary`.""" - return self.common_dict - - def display_samples_once_in_a_while(self, smp, mono_lang, other_lang): - self._show_samples_ctr += 1 - if self._show_samples_ctr < self.SHOW_SAMPLES_INTERVAL: - return - self._show_samples_ctr = 0 - - ln = smp["net_input"]["src_tokens"].shape[0] - - logger.info( - f"(r:{self.args.distributed_rank}) : " - f"{other_lang} ---> {mono_lang} " - f"({other_lang} was generated by back-translation.) {ln} samples" - ) - - for i in range(min(ln, self.SHOW_SAMPLES_NUMBER)): - src_tokens = smp["net_input"]["src_tokens"][i] - tgt_tokens = smp["target"][i] - - src_str = self.dictionary.string(src_tokens, "sentencepiece") - tgt_str = self.dictionary.string(tgt_tokens, "sentencepiece") - logger.info( - f"\n{i}\t\t[{other_lang} generated] {src_str}\n" - f"\t\t[{mono_lang} original ] {tgt_str}\n" - f"\t\t[ src tokens] {src_tokens}\n" - ) - - def backtranslate_sample(self, smp, orig_lang, other_lang) -> None: - """ - * WARNING: smp is modified in place. - * At the start of this function, `smp` has the same input and target: - |--------------------------------------------------------| - | smp['net_input']['src_tokens'] | smp['target'] | - | (from data) __en__ hello world | __en__ hello world | - |--------------------------------------------------------| - - * We call generator.generate(smp, bos_token = token("ro")), - and copy the result as input - * At the end, `smp` has the translation to other language. - |--------------------------------------------------------| - | smp['net_input']['src_tokens'] | smp['target'] | - | (generated) __ro__ salut lume | __en__ hello world | - |--------------------------------------------------------| - - """ - bos_token = _lang_token_index(self.dictionary, other_lang) - generated = self.sequence_generators[orig_lang].generate( - models=[], sample=smp, bos_token=bos_token - ) - - max_lngth = max([gn[0]["tokens"].size(0) for gn in generated]) - net_input = smp["net_input"] - n_src_tokens = torch.empty( - size=(len(generated), max_lngth + 1), dtype=net_input["src_tokens"].dtype - ) - n_src_lengths = torch.empty( - len(generated), dtype=net_input["src_lengths"].dtype - ) - - for i, gn in enumerate(generated): - tokens = gn[0]["tokens"] - tokens_size = tokens.size(0) - padding_needed = max_lngth - tokens_size - tokens = torch.cat([tokens.new([bos_token]), tokens]) - tokens = F.pad(tokens, (0, padding_needed), value=self.dictionary.pad()) - n_src_tokens[i] = tokens - n_src_lengths[i] = tokens_size + 1 - - device = net_input["src_tokens"].device - # This seems to be important - del net_input["src_tokens"] - del net_input["src_lengths"] - net_input["src_tokens"] = n_src_tokens.to(device) - net_input["src_lengths"] = n_src_lengths.to(device) - - def generate(self, smp, model): - model.eval() - orig_lang = ( - self.dictionary[smp["net_input"]["src_tokens"][0][0]] - .replace(" ", "") - .replace("_", "") - ) - bos_token = smp["net_input"]["prev_output_tokens"][0][0] - with torch.no_grad(): - generated = self.sequence_generators[orig_lang].generate( - models=[model], sample=smp, bos_token=bos_token - ) - return generated - - def get_other_lang(self, lang): - # TODO: allow more complex mapping - if lang != self.mono_langs[0]: - return self.mono_langs[0] - if len(self.mono_langs) == 2: - return self.mono_langs[1] - return self.mono_langs[np.random.randint(1, len(self.mono_langs))] - - def train_step( - self, sample, model, criterion, optimizer, update_num, ignore_grad=False - ): - - model.train() - model.set_num_updates(update_num) - - agg_loss, agg_sample_size = 0.0, 0.0 - agg_logging_output: Dict[str, float] = defaultdict(float) - - dataset_keys = self.datasets["train"].datasets.keys() - - weights = { - "BT": self.lambda_bt(update_num), - "DENOISE": self.lambda_dae(update_num), - } - log_keys = {"BT": "bt_", "DENOISE": "dae_"} - - for dataset_key in dataset_keys: - smp = sample[dataset_key] - mono_lang, task_subtype = dataset_key.split("-") - if weights[task_subtype] == 0: - continue - - if task_subtype == "BT": - with torch.autograd.profiler.record_function("backtranslation"): - model.eval() - # TODO: Could we translate to several language at once ? - # this would allow to share encoder_out and maximize GPU usage. - other_lang = self.get_other_lang(mono_lang) - self.backtranslate_sample(smp, mono_lang, other_lang) - self.display_samples_once_in_a_while(smp, mono_lang, other_lang) - model.train() - - # Like in FairseqTask.train_step - with torch.autograd.profiler.record_function("forward"): - loss, sample_size, logging_output = criterion(model, smp) - loss *= weights[task_subtype] - if ignore_grad: - loss *= 0 - with torch.autograd.profiler.record_function("backward"): - optimizer.backward(loss) - - agg_loss += loss.item() - agg_sample_size += sample_size - for k in logging_output: - agg_logging_output[log_keys[task_subtype] + k] += logging_output[k] - agg_logging_output[k] += logging_output[k] - - return agg_loss, agg_sample_size, agg_logging_output - - def get_bos_token_from_sample(self, sample): - net_input = sample["net_input"] - source_lang_token_id = torch.unique(net_input["src_tokens"][:, 0]).item() - source_lang_token = self.dictionary[source_lang_token_id].replace("_", "") - target_lang_token_id = _lang_token_index( - self.dictionary, self.get_other_lang(source_lang_token) - ) - - return target_lang_token_id - - def reduce_metrics(self, logging_outputs, criterion): - super().reduce_metrics(logging_outputs, criterion) - bt_sample_size = sum(x.get("bt_sample_size", 0) for x in logging_outputs) - if bt_sample_size: - bt_loss_sum = sum(x.get("bt_loss", 0) for x in logging_outputs) - bt_loss_sum *= 1 / bt_sample_size / math.log(2) - metrics.log_scalar("bt_loss", bt_loss_sum, bt_sample_size, round=3) - - bt_nll_loss_sum = sum(x.get("bt_nll_loss", 0) for x in logging_outputs) - bt_ntokens = sum(x.get("bt_ntokens", 0) for x in logging_outputs) - bt_nll_loss_sum *= 1 / bt_ntokens / math.log(2) - metrics.log_scalar("bt_nll_loss", bt_nll_loss_sum, bt_ntokens, round=3) - metrics.log_derived( - "bt_ppl", lambda meters: utils.get_perplexity(meters["bt_nll_loss"].avg) - ) - - dae_sample_size = sum(x.get("dae_sample_size", 0) for x in logging_outputs) - if dae_sample_size: - dae_loss_sum = sum(x.get("dae_loss", 0) for x in logging_outputs) - dae_loss_sum *= 1 / dae_sample_size / math.log(2) - metrics.log_scalar("dae_loss", dae_loss_sum, dae_sample_size, round=3) - - dae_nll_loss_sum = sum(x.get("dae_nll_loss", 0) for x in logging_outputs) - dae_ntokens = sum(x.get("dae_ntokens", 0) for x in logging_outputs) - dae_nll_loss_sum *= 1 / dae_ntokens / math.log(2) - metrics.log_scalar("dae_nll_loss", dae_nll_loss_sum, dae_ntokens, round=3) - metrics.log_derived( - "dae_ppl", - lambda meters: utils.get_perplexity(meters["dae_nll_loss"].avg), - ) - - -@torch.no_grad() -def extend_embedding( - emb: nn.Module, new_vocab_size: int, copy_from_token_id: int -) -> None: - old_emb_data = emb.weight.data - (old_vocab_size, dim) = old_emb_data.shape - assert new_vocab_size >= old_vocab_size - - if new_vocab_size > old_vocab_size: - emb.weight.data = torch.zeros((new_vocab_size, dim)) - emb.weight.data[:old_vocab_size, :] = old_emb_data - # initialize new embeddings - emb.weight.data[old_vocab_size:, :] = old_emb_data[copy_from_token_id] - if hasattr(emb, "num_embeddings"): - emb.num_embeddings = new_vocab_size - if hasattr(emb, "out_features"): - emb.out_features = new_vocab_size - - if getattr(emb, "bias", None) is None: - return - - # Fix the bias. - # Bias shape can be different from the previous vocab size - # if the weight matrix was shared and alread extended but not the bias. - (old_vocab_size,) = emb.bias.shape - assert new_vocab_size >= old_vocab_size - if new_vocab_size > old_vocab_size: - old_bias = emb.bias.data - new_bias = torch.zeros( - (new_vocab_size,), dtype=old_bias.dtype, device=old_bias.device - ) - new_bias[:old_vocab_size] = old_bias - emb.bias.data = new_bias - - -def add_secial_tokens_to_dict_and_model( - dictionary: "fairseq.data.Dictionary", - model: nn.Module, - mono_langs: Sequence[str], -) -> None: - embs = model.encoder.embed_tokens - vocab_size, embedding_dim = embs.weight.shape - - # The model may or may not have a '' embedding yet - assert ( - len(dictionary) <= vocab_size <= len(dictionary) + 1 - ), f"Dictionary len ({len(dictionary)}) doesn't match embs shape ({embs.weight.shape})" - # TODO: we should reuse the pretrained model dict which already has - dictionary.add_symbol("") - - for lang in mono_langs: - lang_token = _lang_token(lang) - dictionary.add_symbol(lang_token) - logger.info( - f"dictionary: {len(dictionary)} -> {vocab_size} tokens " - f"after adding {len(mono_langs)} lang tokens." - ) - - if len(dictionary) <= vocab_size: - return - - extend_embedding(embs, len(dictionary), dictionary.bos()) - dec_embs = model.decoder.embed_tokens - extend_embedding(dec_embs, len(dictionary), dictionary.bos()) - lm_head = model.decoder.output_projection - extend_embedding(lm_head, len(dictionary), dictionary.bos()) - assert lm_head.weight.shape == (len(dictionary), embedding_dim) - - -def _lang_token(lang: str) -> str: - return f"__{lang}__" - - -def _lang_token_index(dictionary, lang: str) -> int: - return dictionary.index(_lang_token(lang)) - - -@contextlib.contextmanager -def assert_weights_have_changed(model: nn.Module): - def checksum(model: nn.Module) -> float: - return sum(p.sum().item() for p in model.parameters()) - - initial_checksum = checksum(model) - yield model - final_checksum = checksum(model) - logger.info( - f"initial_checksum={initial_checksum} -> final_checksum={final_checksum}" - ) - assert initial_checksum != final_checksum, "Model hasn't changed !" diff --git a/spaces/OpenDILabCommunity/LLMRiddlesChatGPTEN/llmriddles/questions/level4.py b/spaces/OpenDILabCommunity/LLMRiddlesChatGPTEN/llmriddles/questions/level4.py deleted file mode 100644 index eabe8a2d50f4920fe2aa13946466cd65e9b2cd84..0000000000000000000000000000000000000000 --- a/spaces/OpenDILabCommunity/LLMRiddlesChatGPTEN/llmriddles/questions/level4.py +++ /dev/null @@ -1,104 +0,0 @@ -import re - -from .question import register_question - - -def check_if_is_number(text: str): - try: - int(text) - return True - except ValueError: - return False - - -def get_all_numbers_in_a_sentence(text: str): - return [int(i) for i in re.findall(r'\d+', text)] - - -# CN_TEXT_1 = """ -# 第四章第一题,请构造一个问题A,它的回复是不同于A的B,然后以B提问能再次得到A。 - -# 请在下面的输入框内填写你构造并点击按钮提交。 -# """ - -# EN_TEXT_1 = """ -# For the first question in chapter 4, please make a question A whose answer is B that is different from A, and then ask B to get A again. - -# Please enter your query below and click the submit button -# """ - - -# def _checker_1(question_text: str, user_text: str, answer_text: str, lang: str): -# _ = question_text, lang -# answer_text = answer_text.strip() -# user_text = user_text.strip() -# pass - -# register_question({ -# 'cn': CN_TEXT_1, -# 'en': EN_TEXT_1, -# }, _checker_1, level=4) - - -# CN_TEXT_2 = """ -# 第四章第二题, - -# 请在下面的输入框内填写你构造并点击按钮提交。 -# """ - -# EN_TEXT_2 = """ -# For the second question in chapter 4, - -# Please enter your query below and click the submit button -# """ - - -# def _checker_2(question_text: str, user_text: str, answer_text: str, lang: str): -# _ = question_text, lang -# answer_text = answer_text.strip() -# user_text = user_text.strip() -# pass - -# register_question({ -# 'cn': CN_TEXT_2, -# 'en': EN_TEXT_2, -# }, _checker_2, level=4) - - -CN_TEXT_3 = """ -第四章第一题(自然之密),请输入一个大于一的正整数作为问题,使回答里包含和它刚好相差1的数。 - -请在下面的输入框内填写你构造并点击按钮提交。 -""" - -EN_TEXT_3 = """ -For the first question in chapter 4, please enter a positive integer greater than one as the question so that the answer contains a number that is exactly 1 different from it. - -Please enter your query below and click the submit button -""" - - -def _checker_3(question_text: str, user_text: str, answer_text: str, lang: str): - _ = question_text, lang - answer_text = answer_text.strip() - user_text = user_text.strip() - if not check_if_is_number(user_text): - return False, "问题应该是一个正整数" if lang == 'cn' else 'Question should be a positive integer.' - elif int(user_text) == 1: - return False, "问题应该是一个大于1的正整数" if lang == 'cn' else 'Question should be a positive integer greater than 1.' - elif int(user_text) - 1 not in get_all_numbers_in_a_sentence(answer_text) or int( - user_text) + 1 not in get_all_numbers_in_a_sentence(answer_text): - return False, "回答中应该包含一个与问题相差1的数字" if lang == 'cn' else 'Answer should contain a number that is exactly 1 different from the question.' - else: - return True, None - - -register_question( - { - 'cn': CN_TEXT_3, - 'en': EN_TEXT_3, - }, - checkers=_checker_3, - name={'cn': '4-3 自然之密', 'en': '4-3'}, - level=4, -) diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/docs/notes/benchmarks.md b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/docs/notes/benchmarks.md deleted file mode 100644 index b41588daf3a039b9034e80366c2710e90ba3e056..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/docs/notes/benchmarks.md +++ /dev/null @@ -1,196 +0,0 @@ - -# Benchmarks - -Here we benchmark the training speed of a Mask R-CNN in detectron2, -with some other popular open source Mask R-CNN implementations. - - -### Settings - -* Hardware: 8 NVIDIA V100s with NVLink. -* Software: Python 3.7, CUDA 10.1, cuDNN 7.6.5, PyTorch 1.5, - TensorFlow 1.15.0rc2, Keras 2.2.5, MxNet 1.6.0b20190820. -* Model: an end-to-end R-50-FPN Mask-RCNN model, using the same hyperparameter as the - [Detectron baseline config](https://github.com/facebookresearch/Detectron/blob/master/configs/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml) - (it does not have scale augmentation). -* Metrics: We use the average throughput in iterations 100-500 to skip GPU warmup time. - Note that for R-CNN-style models, the throughput of a model typically changes during training, because - it depends on the predictions of the model. Therefore this metric is not directly comparable with - "train speed" in model zoo, which is the average speed of the entire training run. - - -### Main Results - -```eval_rst -+-------------------------------+--------------------+ -| Implementation | Throughput (img/s) | -+===============================+====================+ -| |D2| |PT| | 62 | -+-------------------------------+--------------------+ -| mmdetection_ |PT| | 53 | -+-------------------------------+--------------------+ -| maskrcnn-benchmark_ |PT| | 53 | -+-------------------------------+--------------------+ -| tensorpack_ |TF| | 50 | -+-------------------------------+--------------------+ -| simpledet_ |mxnet| | 39 | -+-------------------------------+--------------------+ -| Detectron_ |C2| | 19 | -+-------------------------------+--------------------+ -| `matterport/Mask_RCNN`__ |TF| | 14 | -+-------------------------------+--------------------+ - -.. _maskrcnn-benchmark: https://github.com/facebookresearch/maskrcnn-benchmark/ -.. _tensorpack: https://github.com/tensorpack/tensorpack/tree/master/examples/FasterRCNN -.. _mmdetection: https://github.com/open-mmlab/mmdetection/ -.. _simpledet: https://github.com/TuSimple/simpledet/ -.. _Detectron: https://github.com/facebookresearch/Detectron -__ https://github.com/matterport/Mask_RCNN/ - -.. |D2| image:: https://github.com/facebookresearch/detectron2/raw/main/.github/Detectron2-Logo-Horz.svg?sanitize=true - :height: 15pt - :target: https://github.com/facebookresearch/detectron2/ -.. |PT| image:: https://pytorch.org/assets/images/logo-icon.svg - :width: 15pt - :height: 15pt - :target: https://pytorch.org -.. |TF| image:: https://static.nvidiagrid.net/ngc/containers/tensorflow.png - :width: 15pt - :height: 15pt - :target: https://tensorflow.org -.. |mxnet| image:: https://github.com/dmlc/web-data/raw/master/mxnet/image/mxnet_favicon.png - :width: 15pt - :height: 15pt - :target: https://mxnet.apache.org/ -.. |C2| image:: https://caffe2.ai/static/logo.svg - :width: 15pt - :height: 15pt - :target: https://caffe2.ai -``` - - -Details for each implementation: - -* __Detectron2__: with release v0.1.2, run: - ``` - python tools/train_net.py --config-file configs/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml --num-gpus 8 - ``` - -* __mmdetection__: at commit `b0d845f`, run - ``` - ./tools/dist_train.sh configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py 8 - ``` - -* __maskrcnn-benchmark__: use commit `0ce8f6f` with `sed -i 's/torch.uint8/torch.bool/g' **/*.py; sed -i 's/AT_CHECK/TORCH_CHECK/g' **/*.cu` - to make it compatible with PyTorch 1.5. Then, run training with - ``` - python -m torch.distributed.launch --nproc_per_node=8 tools/train_net.py --config-file configs/e2e_mask_rcnn_R_50_FPN_1x.yaml - ``` - The speed we observed is faster than its model zoo, likely due to different software versions. - -* __tensorpack__: at commit `caafda`, `export TF_CUDNN_USE_AUTOTUNE=0`, then run - ``` - mpirun -np 8 ./train.py --config DATA.BASEDIR=/data/coco TRAINER=horovod BACKBONE.STRIDE_1X1=True TRAIN.STEPS_PER_EPOCH=50 --load ImageNet-R50-AlignPadding.npz - ``` - -* __SimpleDet__: at commit `9187a1`, run - ``` - python detection_train.py --config config/mask_r50v1_fpn_1x.py - ``` - -* __Detectron__: run - ``` - python tools/train_net.py --cfg configs/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml - ``` - Note that many of its ops run on CPUs, therefore the performance is limited. - -* __matterport/Mask_RCNN__: at commit `3deaec`, apply the following diff, `export TF_CUDNN_USE_AUTOTUNE=0`, then run - ``` - python coco.py train --dataset=/data/coco/ --model=imagenet - ``` - Note that many small details in this implementation might be different - from Detectron's standards. - -
- - (diff to make it use the same hyperparameters - click to expand) - - - ```diff - diff --git i/mrcnn/model.py w/mrcnn/model.py - index 62cb2b0..61d7779 100644 - --- i/mrcnn/model.py - +++ w/mrcnn/model.py - @@ -2367,8 +2367,8 @@ class MaskRCNN(): - epochs=epochs, - steps_per_epoch=self.config.STEPS_PER_EPOCH, - callbacks=callbacks, - - validation_data=val_generator, - - validation_steps=self.config.VALIDATION_STEPS, - + #validation_data=val_generator, - + #validation_steps=self.config.VALIDATION_STEPS, - max_queue_size=100, - workers=workers, - use_multiprocessing=True, - diff --git i/mrcnn/parallel_model.py w/mrcnn/parallel_model.py - index d2bf53b..060172a 100644 - --- i/mrcnn/parallel_model.py - +++ w/mrcnn/parallel_model.py - @@ -32,6 +32,7 @@ class ParallelModel(KM.Model): - keras_model: The Keras model to parallelize - gpu_count: Number of GPUs. Must be > 1 - """ - + super().__init__() - self.inner_model = keras_model - self.gpu_count = gpu_count - merged_outputs = self.make_parallel() - diff --git i/samples/coco/coco.py w/samples/coco/coco.py - index 5d172b5..239ed75 100644 - --- i/samples/coco/coco.py - +++ w/samples/coco/coco.py - @@ -81,7 +81,10 @@ class CocoConfig(Config): - IMAGES_PER_GPU = 2 - - # Uncomment to train on 8 GPUs (default is 1) - - # GPU_COUNT = 8 - + GPU_COUNT = 8 - + BACKBONE = "resnet50" - + STEPS_PER_EPOCH = 50 - + TRAIN_ROIS_PER_IMAGE = 512 - - # Number of classes (including background) - NUM_CLASSES = 1 + 80 # COCO has 80 classes - @@ -496,29 +499,10 @@ if __name__ == '__main__': - # *** This training schedule is an example. Update to your needs *** - - # Training - Stage 1 - - print("Training network heads") - model.train(dataset_train, dataset_val, - learning_rate=config.LEARNING_RATE, - epochs=40, - - layers='heads', - - augmentation=augmentation) - - - - # Training - Stage 2 - - # Finetune layers from ResNet stage 4 and up - - print("Fine tune Resnet stage 4 and up") - - model.train(dataset_train, dataset_val, - - learning_rate=config.LEARNING_RATE, - - epochs=120, - - layers='4+', - - augmentation=augmentation) - - - - # Training - Stage 3 - - # Fine tune all layers - - print("Fine tune all layers") - - model.train(dataset_train, dataset_val, - - learning_rate=config.LEARNING_RATE / 10, - - epochs=160, - - layers='all', - + layers='3+', - augmentation=augmentation) - - elif args.command == "evaluate": - ``` - -
diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/vit.py b/spaces/OpenGVLab/InternGPT/iGPT/models/vit.py deleted file mode 100644 index 9d910a687ef6e74a6d7541e81c93b06aea7fda60..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/vit.py +++ /dev/null @@ -1,300 +0,0 @@ -''' - * Copyright (c) 2022, salesforce.com, inc. - * All rights reserved. - * SPDX-License-Identifier: BSD-3-Clause - * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause - * By Junnan Li - * Based on timm code base - * https://github.com/rwightman/pytorch-image-models/tree/master/timm -''' - -import torch -import torch.nn as nn -import torch.nn.functional as F -from functools import partial - -from timm.models.vision_transformer import _cfg, PatchEmbed -from timm.models.registry import register_model -from timm.models.layers import trunc_normal_, DropPath -from timm.models.helpers import named_apply, adapt_input_conv - -from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper - -class Mlp(nn.Module): - """ MLP as used in Vision Transformer, MLP-Mixer and related networks - """ - def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -class Attention(nn.Module): - def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): - super().__init__() - self.num_heads = num_heads - head_dim = dim // num_heads - # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights - self.scale = qk_scale or head_dim ** -0.5 - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - self.proj_drop = nn.Dropout(proj_drop) - self.attn_gradients = None - self.attention_map = None - - def save_attn_gradients(self, attn_gradients): - self.attn_gradients = attn_gradients - - def get_attn_gradients(self): - return self.attn_gradients - - def save_attention_map(self, attention_map): - self.attention_map = attention_map - - def get_attention_map(self): - return self.attention_map - - def forward(self, x, register_hook=False): - B, N, C = x.shape - qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) - q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) - - attn = (q @ k.transpose(-2, -1)) * self.scale - attn = attn.softmax(dim=-1) - attn = self.attn_drop(attn) - - if register_hook: - self.save_attention_map(attn) - attn.register_hook(self.save_attn_gradients) - - x = (attn @ v).transpose(1, 2).reshape(B, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x - - -class Block(nn.Module): - - def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., - drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_grad_checkpointing=False): - super().__init__() - self.norm1 = norm_layer(dim) - self.attn = Attention( - dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) - # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - if use_grad_checkpointing: - self.attn = checkpoint_wrapper(self.attn) - self.mlp = checkpoint_wrapper(self.mlp) - - def forward(self, x, register_hook=False): - x = x + self.drop_path(self.attn(self.norm1(x), register_hook=register_hook)) - x = x + self.drop_path(self.mlp(self.norm2(x))) - return x - - -class VisionTransformer(nn.Module): - """ Vision Transformer - A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - - https://arxiv.org/abs/2010.11929 - """ - def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, - num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None, - drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None, - use_grad_checkpointing=False, ckpt_layer=0): - """ - Args: - img_size (int, tuple): input image size - patch_size (int, tuple): patch size - in_chans (int): number of input channels - num_classes (int): number of classes for classification head - embed_dim (int): embedding dimension - depth (int): depth of transformer - num_heads (int): number of attention heads - mlp_ratio (int): ratio of mlp hidden dim to embedding dim - qkv_bias (bool): enable bias for qkv if True - qk_scale (float): override default qk scale of head_dim ** -0.5 if set - representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set - drop_rate (float): dropout rate - attn_drop_rate (float): attention dropout rate - drop_path_rate (float): stochastic depth rate - norm_layer: (nn.Module): normalization layer - """ - super().__init__() - self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models - norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) - - self.patch_embed = PatchEmbed( - img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) - - num_patches = self.patch_embed.num_patches - - self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) - self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) - self.pos_drop = nn.Dropout(p=drop_rate) - - dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule - self.blocks = nn.ModuleList([ - Block( - dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, - use_grad_checkpointing=(use_grad_checkpointing and i>=depth-ckpt_layer) - ) - for i in range(depth)]) - self.norm = norm_layer(embed_dim) - - trunc_normal_(self.pos_embed, std=.02) - trunc_normal_(self.cls_token, std=.02) - self.apply(self._init_weights) - - def _init_weights(self, m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight, std=.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - @torch.jit.ignore - def no_weight_decay(self): - return {'pos_embed', 'cls_token'} - - def forward(self, x, register_blk=-1): - B = x.shape[0] - x = self.patch_embed(x) - - cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks - x = torch.cat((cls_tokens, x), dim=1) - - x = x + self.pos_embed[:,:x.size(1),:] - x = self.pos_drop(x) - - for i,blk in enumerate(self.blocks): - x = blk(x, register_blk==i) - x = self.norm(x) - - return x - - @torch.jit.ignore() - def load_pretrained(self, checkpoint_path, prefix=''): - _load_weights(self, checkpoint_path, prefix) - - -@torch.no_grad() -def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''): - """ Load weights from .npz checkpoints for official Google Brain Flax implementation - """ - import numpy as np - - def _n2p(w, t=True): - if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: - w = w.flatten() - if t: - if w.ndim == 4: - w = w.transpose([3, 2, 0, 1]) - elif w.ndim == 3: - w = w.transpose([2, 0, 1]) - elif w.ndim == 2: - w = w.transpose([1, 0]) - return torch.from_numpy(w) - - w = np.load(checkpoint_path) - if not prefix and 'opt/target/embedding/kernel' in w: - prefix = 'opt/target/' - - if hasattr(model.patch_embed, 'backbone'): - # hybrid - backbone = model.patch_embed.backbone - stem_only = not hasattr(backbone, 'stem') - stem = backbone if stem_only else backbone.stem - stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel']))) - stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale'])) - stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias'])) - if not stem_only: - for i, stage in enumerate(backbone.stages): - for j, block in enumerate(stage.blocks): - bp = f'{prefix}block{i + 1}/unit{j + 1}/' - for r in range(3): - getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel'])) - getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale'])) - getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias'])) - if block.downsample is not None: - block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel'])) - block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale'])) - block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias'])) - embed_conv_w = _n2p(w[f'{prefix}embedding/kernel']) - else: - embed_conv_w = adapt_input_conv( - model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel'])) - model.patch_embed.proj.weight.copy_(embed_conv_w) - model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias'])) - model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False)) - pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False) - if pos_embed_w.shape != model.pos_embed.shape: - pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights - pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) - model.pos_embed.copy_(pos_embed_w) - model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale'])) - model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias'])) - - for i, block in enumerate(model.blocks.children()): - block_prefix = f'{prefix}Transformer/encoderblock_{i}/' - mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/' - block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'])) - block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'])) - block.attn.qkv.weight.copy_(torch.cat([ - _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')])) - block.attn.qkv.bias.copy_(torch.cat([ - _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')])) - block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1)) - block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'])) - for r in range(2): - getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel'])) - getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias'])) - block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale'])) - block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias'])) - - -def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder): - # interpolate position embedding - embedding_size = pos_embed_checkpoint.shape[-1] - num_patches = visual_encoder.patch_embed.num_patches - num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches - # height (== width) for the checkpoint position embedding - orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) - # height (== width) for the new position embedding - new_size = int(num_patches ** 0.5) - - if orig_size!=new_size: - # class_token and dist_token are kept unchanged - extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] - # only the position tokens are interpolated - pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] - pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) - pos_tokens = torch.nn.functional.interpolate( - pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) - pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) - new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) - print('reshape position embedding from %d to %d'%(orig_size ** 2,new_size ** 2)) - - return new_pos_embed - else: - return pos_embed_checkpoint \ No newline at end of file diff --git a/spaces/OpenMind-AI/starchat-playground/README.md b/spaces/OpenMind-AI/starchat-playground/README.md deleted file mode 100644 index a3f814d573451044b052699b5d3fd842790b26bb..0000000000000000000000000000000000000000 --- a/spaces/OpenMind-AI/starchat-playground/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: StarChat Playground -emoji: ⭐️💬 -colorFrom: pink -colorTo: indigo -sdk: gradio -sdk_version: 3.28.3 -app_file: app.py -pinned: false -license: mit -duplicated_from: HuggingFaceH4/starchat-playground ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/utils/__init__.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/utils/__init__.py deleted file mode 100644 index 3d3bdd349b9f2ae499a2fcb2ac1d2e3c77befebe..0000000000000000000000000000000000000000 --- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/utils/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from .drop import DropPath -from .inverted_residual import InvertedResidual, InvertedResidualV3 -from .make_divisible import make_divisible -from .res_layer import ResLayer -from .se_layer import SELayer -from .self_attention_block import SelfAttentionBlock -from .up_conv_block import UpConvBlock -from .weight_init import trunc_normal_ - -__all__ = [ - 'ResLayer', 'SelfAttentionBlock', 'make_divisible', 'InvertedResidual', - 'UpConvBlock', 'InvertedResidualV3', 'SELayer', 'DropPath', 'trunc_normal_' -] diff --git a/spaces/PKUWilliamYang/VToonify/vtoonify/model/stylegan/op/readme.md b/spaces/PKUWilliamYang/VToonify/vtoonify/model/stylegan/op/readme.md deleted file mode 100644 index 7cffcfc72069ff9a098d292f9e37035031e19081..0000000000000000000000000000000000000000 --- a/spaces/PKUWilliamYang/VToonify/vtoonify/model/stylegan/op/readme.md +++ /dev/null @@ -1,12 +0,0 @@ -Code from [rosinality-stylegan2-pytorch-cp](https://github.com/senior-sigan/rosinality-stylegan2-pytorch-cpu) - -Scripts to convert rosinality/stylegan2-pytorch to the CPU compatible format - -If you would like to use CPU for testing or have a problem regarding the cpp extention (fused and upfirdn2d), please make the following changes: - -Change `model.stylegan.op` to `model.stylegan.op_cpu` -https://github.com/williamyang1991/VToonify/blob/01b383efc00007f9b069585db41a7d31a77a8806/util.py#L14 - -https://github.com/williamyang1991/VToonify/blob/01b383efc00007f9b069585db41a7d31a77a8806/model/simple_augment.py#L12 - -https://github.com/williamyang1991/VToonify/blob/01b383efc00007f9b069585db41a7d31a77a8806/model/stylegan/model.py#L11 diff --git a/spaces/PSLD/PSLD/diffusion-posterior-sampling/bkse/generic_deblur.py b/spaces/PSLD/PSLD/diffusion-posterior-sampling/bkse/generic_deblur.py deleted file mode 100644 index c384208de249ad7b001e9e580269ef090a81f86c..0000000000000000000000000000000000000000 --- a/spaces/PSLD/PSLD/diffusion-posterior-sampling/bkse/generic_deblur.py +++ /dev/null @@ -1,28 +0,0 @@ -import argparse - -import cv2 -import yaml -from models.deblurring.joint_deblur import JointDeblur - - -def main(): - parser = argparse.ArgumentParser(description="Kernel extractor testing") - - parser.add_argument("--image_path", action="store", help="image path", type=str, required=True) - parser.add_argument("--save_path", action="store", help="save path", type=str, default="res.png") - parser.add_argument("--yml_path", action="store", help="yml path", type=str, required=True) - - args = parser.parse_args() - - # Initializing mode - with open(args.yml_path, "rb") as f: - opt = yaml.safe_load(f) - model = JointDeblur(opt) - - blur_img = cv2.cvtColor(cv2.imread(args.image_path), cv2.COLOR_BGR2RGB) - sharp_img = model.deblur(blur_img) - - cv2.imwrite(args.save_path, sharp_img) - - -main() diff --git a/spaces/PascalLiu/FNeVR_demo/sync_batchnorm/replicate.py b/spaces/PascalLiu/FNeVR_demo/sync_batchnorm/replicate.py deleted file mode 100644 index b71c7b8ed51a1d6c55b1f753bdd8d90bad79bd06..0000000000000000000000000000000000000000 --- a/spaces/PascalLiu/FNeVR_demo/sync_batchnorm/replicate.py +++ /dev/null @@ -1,94 +0,0 @@ -# -*- coding: utf-8 -*- -# File : replicate.py -# Author : Jiayuan Mao -# Email : maojiayuan@gmail.com -# Date : 27/01/2018 -# -# This file is part of Synchronized-BatchNorm-PyTorch. -# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch -# Distributed under MIT License. - -import functools - -from torch.nn.parallel.data_parallel import DataParallel - -__all__ = [ - 'CallbackContext', - 'execute_replication_callbacks', - 'DataParallelWithCallback', - 'patch_replication_callback' -] - - -class CallbackContext(object): - pass - - -def execute_replication_callbacks(modules): - """ - Execute an replication callback `__data_parallel_replicate__` on each module created by original replication. - - The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)` - - Note that, as all modules are isomorphism, we assign each sub-module with a context - (shared among multiple copies of this module on different devices). - Through this context, different copies can share some information. - - We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback - of any slave copies. - """ - master_copy = modules[0] - nr_modules = len(list(master_copy.modules())) - ctxs = [CallbackContext() for _ in range(nr_modules)] - - for i, module in enumerate(modules): - for j, m in enumerate(module.modules()): - if hasattr(m, '__data_parallel_replicate__'): - m.__data_parallel_replicate__(ctxs[j], i) - - -class DataParallelWithCallback(DataParallel): - """ - Data Parallel with a replication callback. - - An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by - original `replicate` function. - The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)` - - Examples: - > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) - > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) - # sync_bn.__data_parallel_replicate__ will be invoked. - """ - - def replicate(self, module, device_ids): - modules = super(DataParallelWithCallback, self).replicate(module, device_ids) - execute_replication_callbacks(modules) - return modules - - -def patch_replication_callback(data_parallel): - """ - Monkey-patch an existing `DataParallel` object. Add the replication callback. - Useful when you have customized `DataParallel` implementation. - - Examples: - > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) - > sync_bn = DataParallel(sync_bn, device_ids=[0, 1]) - > patch_replication_callback(sync_bn) - # this is equivalent to - > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) - > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) - """ - - assert isinstance(data_parallel, DataParallel) - - old_replicate = data_parallel.replicate - - @functools.wraps(old_replicate) - def new_replicate(module, device_ids): - modules = old_replicate(module, device_ids) - execute_replication_callbacks(modules) - return modules - - data_parallel.replicate = new_replicate diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/rnrs/mutable-strings.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/rnrs/mutable-strings.go deleted file mode 100644 index 928f634ba07385363eee71732428d339d5204d3f..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/rnrs/mutable-strings.go and /dev/null differ diff --git a/spaces/PeepDaSlan9/AutoGPT/autogpt/promptgenerator.py b/spaces/PeepDaSlan9/AutoGPT/autogpt/promptgenerator.py deleted file mode 100644 index 0ad7046a0c41dab356abcd0151b65890e5544cd2..0000000000000000000000000000000000000000 --- a/spaces/PeepDaSlan9/AutoGPT/autogpt/promptgenerator.py +++ /dev/null @@ -1,138 +0,0 @@ -""" A module for generating custom prompt strings.""" -from __future__ import annotations - -import json -from typing import Any - - -class PromptGenerator: - """ - A class for generating custom prompt strings based on constraints, commands, - resources, and performance evaluations. - """ - - def __init__(self) -> None: - """ - Initialize the PromptGenerator object with empty lists of constraints, - commands, resources, and performance evaluations. - """ - self.constraints = [] - self.commands = [] - self.resources = [] - self.performance_evaluation = [] - self.response_format = { - "thoughts": { - "text": "thought", - "reasoning": "reasoning", - "plan": "- short bulleted\n- list that conveys\n- long-term plan", - "criticism": "constructive self-criticism", - "speak": "thoughts summary to say to user", - }, - "command": {"name": "command name", "args": {"arg name": "value"}}, - } - - def add_constraint(self, constraint: str) -> None: - """ - Add a constraint to the constraints list. - - Args: - constraint (str): The constraint to be added. - """ - self.constraints.append(constraint) - - def add_command(self, command_label: str, command_name: str, args=None) -> None: - """ - Add a command to the commands list with a label, name, and optional arguments. - - Args: - command_label (str): The label of the command. - command_name (str): The name of the command. - args (dict, optional): A dictionary containing argument names and their - values. Defaults to None. - """ - if args is None: - args = {} - - command_args = {arg_key: arg_value for arg_key, arg_value in args.items()} - - command = { - "label": command_label, - "name": command_name, - "args": command_args, - } - - self.commands.append(command) - - def _generate_command_string(self, command: dict[str, Any]) -> str: - """ - Generate a formatted string representation of a command. - - Args: - command (dict): A dictionary containing command information. - - Returns: - str: The formatted command string. - """ - args_string = ", ".join( - f'"{key}": "{value}"' for key, value in command["args"].items() - ) - return f'{command["label"]}: "{command["name"]}", args: {args_string}' - - def add_resource(self, resource: str) -> None: - """ - Add a resource to the resources list. - - Args: - resource (str): The resource to be added. - """ - self.resources.append(resource) - - def add_performance_evaluation(self, evaluation: str) -> None: - """ - Add a performance evaluation item to the performance_evaluation list. - - Args: - evaluation (str): The evaluation item to be added. - """ - self.performance_evaluation.append(evaluation) - - def _generate_numbered_list(self, items: list[Any], item_type="list") -> str: - """ - Generate a numbered list from given items based on the item_type. - - Args: - items (list): A list of items to be numbered. - item_type (str, optional): The type of items in the list. - Defaults to 'list'. - - Returns: - str: The formatted numbered list. - """ - if item_type == "command": - return "\n".join( - f"{i+1}. {self._generate_command_string(item)}" - for i, item in enumerate(items) - ) - else: - return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items)) - - def generate_prompt_string(self) -> str: - """ - Generate a prompt string based on the constraints, commands, resources, - and performance evaluations. - - Returns: - str: The generated prompt string. - """ - formatted_response_format = json.dumps(self.response_format, indent=4) - return ( - f"Constraints:\n{self._generate_numbered_list(self.constraints)}\n\n" - "Commands:\n" - f"{self._generate_numbered_list(self.commands, item_type='command')}\n\n" - f"Resources:\n{self._generate_numbered_list(self.resources)}\n\n" - "Performance Evaluation:\n" - f"{self._generate_numbered_list(self.performance_evaluation)}\n\n" - "You should only respond in JSON format as described below \nResponse" - f" Format: \n{formatted_response_format} \nEnsure the response can be" - " parsed by Python json.loads" - ) diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/configs/_base_/models/psanet_r50-d8.py b/spaces/Pie31415/control-animation/annotator/uniformer/configs/_base_/models/psanet_r50-d8.py deleted file mode 100644 index 689513fa9d2a40f14bf0ae4ae61f38f0dcc1b3da..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/configs/_base_/models/psanet_r50-d8.py +++ /dev/null @@ -1,49 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='open-mmlab://resnet50_v1c', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='PSAHead', - in_channels=2048, - in_index=3, - channels=512, - mask_size=(97, 97), - psa_type='bi-direction', - compact=False, - shrink_factor=2, - normalization_factor=1.0, - psa_softmax=True, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole')) diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/coco.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/coco.py deleted file mode 100644 index 995cc20fc5a268dd54b73f167d38a7eb536c6dbd..0000000000000000000000000000000000000000 --- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/coco.py +++ /dev/null @@ -1,268 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -import os -import os.path -import math -from PIL import Image, ImageDraw - -import random -import numpy as np - -import torch -import torchvision -import torch.utils.data as data - -from maskrcnn_benchmark.structures.bounding_box import BoxList -from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask -from maskrcnn_benchmark.structures.keypoint import PersonKeypoints -from maskrcnn_benchmark.config import cfg -import pdb - -def _count_visible_keypoints(anno): - return sum(sum(1 for v in ann["keypoints"][2::3] if v > 0) for ann in anno) - - -def _has_only_empty_bbox(anno): - return all(any(o <= 1 for o in obj["bbox"][2:]) for obj in anno) - - -def has_valid_annotation(anno): - # if it's empty, there is no annotation - if len(anno) == 0: - return False - # if all boxes have close to zero area, there is no annotation - if _has_only_empty_bbox(anno): - return False - # keypoints task have a slight different critera for considering - # if an annotation is valid - if "keypoints" not in anno[0]: - return True - # for keypoint detection tasks, only consider valid images those - # containing at least min_keypoints_per_image - if _count_visible_keypoints(anno) >= cfg.DATALOADER.MIN_KPS_PER_IMS: - return True - return False - - -def pil_loader(path, retry=5): - # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835) - ri = 0 - while ri < retry: - try: - with open(path, 'rb') as f: - img = Image.open(f) - return img.convert('RGB') - except: - ri += 1 - - -def rgb2id(color): - if isinstance(color, np.ndarray) and len(color.shape) == 3: - if color.dtype == np.uint8: - color = color.astype(np.int32) - return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2] - return int(color[0] + 256 * color[1] + 256 * 256 * color[2]) - - -class CocoDetection(data.Dataset): - """`MS Coco Detection `_ Dataset. - - Args: - root (string): Root directory where images are downloaded to. - annFile (string): Path to json annotation file. - transform (callable, optional): A function/transform that takes in an PIL image - and returns a transformed version. E.g, ``transforms.ToTensor`` - target_transform (callable, optional): A function/transform that takes in the - target and transforms it. - """ - - def __init__(self, root, annFile, transform=None, target_transform=None): - from pycocotools.coco import COCO - self.root = root - self.coco = COCO(annFile) - self.ids = list(self.coco.imgs.keys()) - self.transform = transform - self.target_transform = target_transform - - def __getitem__(self, index, return_meta=False): - """ - Args: - index (int): Index - - Returns: - tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``. - """ - coco = self.coco - img_id = self.ids[index] - if isinstance(img_id, str): - img_id = [img_id] - ann_ids = coco.getAnnIds(imgIds=img_id) - target = coco.loadAnns(ann_ids) - - meta = coco.loadImgs(img_id)[0] - path = meta['file_name'] - img = pil_loader(os.path.join(self.root, path)) - - if self.transform is not None: - img = self.transform(img) - - if self.target_transform is not None: - target = self.target_transform(target) - - if return_meta: - return img, target, meta - else: - return img, target - - def __len__(self): - return len(self.ids) - - def __repr__(self): - fmt_str = 'Dataset ' + self.__class__.__name__ + '\n' - fmt_str += ' Number of datapoints: {}\n'.format(self.__len__()) - fmt_str += ' Root Location: {}\n'.format(self.root) - tmp = ' Transforms (if any): ' - fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp))) - tmp = ' Target Transforms (if any): ' - fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp))) - return fmt_str - - -class COCODataset(CocoDetection): - def __init__(self, ann_file, root, remove_images_without_annotations, transforms=None, ignore_crowd=True, - max_box=-1, - few_shot=0, one_hot=False, override_category=None, **kwargs - ): - super(COCODataset, self).__init__(root, ann_file) - # sort indices for reproducible results - self.ids = sorted(self.ids) - - # filter images without detection annotations - if remove_images_without_annotations: - ids = [] - for img_id in self.ids: - if isinstance(img_id, str): - ann_ids = self.coco.getAnnIds(imgIds=[img_id], iscrowd=None) - else: - ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=None) - anno = self.coco.loadAnns(ann_ids) - if has_valid_annotation(anno): - ids.append(img_id) - self.ids = ids - - if few_shot: - ids = [] - cats_freq = [few_shot]*len(self.coco.cats.keys()) - if 'shuffle_seed' in kwargs and kwargs['shuffle_seed'] != 0: - import random - random.Random(kwargs['shuffle_seed']).shuffle(self.ids) - print("Shuffle the dataset with random seed: ", kwargs['shuffle_seed']) - for img_id in self.ids: - if isinstance(img_id, str): - ann_ids = self.coco.getAnnIds(imgIds=[img_id], iscrowd=None) - else: - ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=None) - anno = self.coco.loadAnns(ann_ids) - cat = set([ann['category_id'] for ann in anno]) #set/tuple corresponde to instance/image level - is_needed = sum([cats_freq[c-1]>0 for c in cat]) - if is_needed: - ids.append(img_id) - for c in cat: - cats_freq[c-1] -= 1 - # print(cat, cats_freq) - self.ids = ids - - if override_category is not None: - self.coco.dataset["categories"] = override_category - print("Override category: ", override_category) - - self.json_category_id_to_contiguous_id = { - v: i + 1 for i, v in enumerate(self.coco.getCatIds()) - } - self.contiguous_category_id_to_json_id = { - v: k for k, v in self.json_category_id_to_contiguous_id.items() - } - self.id_to_img_map = {k: v for k, v in enumerate(self.ids)} - self.transforms = transforms - self.ignore_crowd = ignore_crowd - self.max_box = max_box - self.one_hot = one_hot - - def categories(self, no_background=True): - categories = self.coco.dataset["categories"] - label_list = {} - for index, i in enumerate(categories): - if not no_background or (i["name"] != "__background__" and i['id'] != 0): - label_list[self.json_category_id_to_contiguous_id[i["id"]]] = i["name"] - return label_list - - def __getitem__(self, idx): - - - img, anno = super(COCODataset, self).__getitem__(idx) - - # filter crowd annotations - if self.ignore_crowd: - anno = [obj for obj in anno if obj["iscrowd"] == 0] - - boxes = [obj["bbox"] for obj in anno] - boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes - if self.max_box > 0 and len(boxes) > self.max_box: - rand_idx = torch.randperm(self.max_box) - boxes = boxes[rand_idx, :] - else: - rand_idx = None - target = BoxList(boxes, img.size, mode="xywh").convert("xyxy") - - classes = [obj["category_id"] for obj in anno] - classes = [self.json_category_id_to_contiguous_id[c] for c in classes] - classes = torch.tensor(classes) - - if rand_idx is not None: - classes = classes[rand_idx] - if cfg.DATASETS.CLASS_AGNOSTIC: - classes = torch.ones_like(classes) - target.add_field("labels", classes) - - if anno and "segmentation" in anno[0]: - masks = [obj["segmentation"] for obj in anno] - masks = SegmentationMask(masks, img.size, mode='poly') - target.add_field("masks", masks) - - if anno and "cbox" in anno[0]: - cboxes = [obj["cbox"] for obj in anno] - cboxes = torch.as_tensor(cboxes).reshape(-1, 4) # guard against no boxes - cboxes = BoxList(cboxes, img.size, mode="xywh").convert("xyxy") - target.add_field("cbox", cboxes) - - if anno and "keypoints" in anno[0]: - keypoints = [] - gt_keypoint = self.coco.cats[1]['keypoints'] # a better way to get keypoint description - use_keypoint = cfg.MODEL.ROI_KEYPOINT_HEAD.KEYPOINT_NAME - for obj in anno: - if len(use_keypoint) > 0: - kps = [] - for name in use_keypoint: - kp_idx = slice(3 * gt_keypoint.index(name), 3 * gt_keypoint.index(name) + 3) - kps += obj["keypoints"][kp_idx] - keypoints.append(kps) - else: - keypoints.append(obj["keypoints"]) - keypoints = PersonKeypoints(keypoints, img.size) - target.add_field("keypoints", keypoints) - - target = target.clip_to_image(remove_empty=True) - - if self.transforms is not None: - img, target = self.transforms(img, target) - - if cfg.DATASETS.SAMPLE_RATIO != 0.0: - ratio = cfg.DATASETS.SAMPLE_RATIO - num_sample_target = math.ceil(len(target) * ratio) if ratio > 0 else math.ceil(-ratio) - sample_idx = torch.randperm(len(target))[:num_sample_target] - target = target[sample_idx] - return img, target, idx - - def get_img_info(self, index): - img_id = self.id_to_img_map[index] - img_data = self.coco.imgs[img_id] - return img_data diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/layers/se.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/layers/se.py deleted file mode 100644 index e8027a0de1047c2a2fdd6a60aa90ac3f5e114940..0000000000000000000000000000000000000000 --- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/layers/se.py +++ /dev/null @@ -1,52 +0,0 @@ -from torch import nn - - -class SELayer(nn.Module): - def __init__(self, channel, reduction=16): - super(SELayer, self).__init__() - self.avg_pool = nn.AdaptiveAvgPool2d(1) - self.fc = nn.Sequential( - nn.Linear(channel, channel // reduction, bias=False), - nn.ReLU(inplace=True), - nn.Linear(channel // reduction, channel, bias=False), - nn.Sigmoid() - ) - - def forward(self, x): - b, c, _, _ = x.size() - y = self.avg_pool(x).view(b, c) - y = self.fc(y).view(b, c, 1, 1) - return x * y.expand_as(x) - - -class SEBlock(nn.Module): - def __init__(self, channels, reduction=16, - use_conv=True, mid_activation=nn.ReLU(inplace=True), out_activation=nn.Sigmoid()): - super(SEBlock, self).__init__() - self.use_conv = use_conv - mid_channels = channels // reduction - - self.pool = nn.AdaptiveAvgPool2d(output_size=1) - if use_conv: - self.conv1 = nn.Conv2d(channels, mid_channels, kernel_size=1, bias=True) - else: - self.fc1 = nn.Linear(channels, mid_channels) - self.activ = mid_activation - if use_conv: - self.conv2 = nn.Conv2d(mid_channels, channels, kernel_size=1, bias=True) - else: - self.fc2 = nn.Linear(mid_channels, channels) - self.sigmoid = out_activation - - def forward(self, x): - w = self.pool(x) - if not self.use_conv: - w = w.view(x.size(0), -1) - w = self.conv1(w) if self.use_conv else self.fc1(w) - w = self.activ(w) - w = self.conv2(w) if self.use_conv else self.fc2(w) - w = self.sigmoid(w) - if not self.use_conv: - w = w.unsqueeze(2).unsqueeze(3) - x = x * w - return x \ No newline at end of file diff --git a/spaces/ProteinDesignLab/protpardelle/ProteinMPNN/training/utils.py b/spaces/ProteinDesignLab/protpardelle/ProteinMPNN/training/utils.py deleted file mode 100644 index 6700b1499e183a1fbadd4a65023d589a5d26d193..0000000000000000000000000000000000000000 --- a/spaces/ProteinDesignLab/protpardelle/ProteinMPNN/training/utils.py +++ /dev/null @@ -1,356 +0,0 @@ -import torch -from torch.utils.data import DataLoader -import csv -from dateutil import parser -import numpy as np -import time -import random -import os - -class StructureDataset(): - def __init__(self, pdb_dict_list, verbose=True, truncate=None, max_length=100, - alphabet='ACDEFGHIKLMNPQRSTVWYX'): - alphabet_set = set([a for a in alphabet]) - discard_count = { - 'bad_chars': 0, - 'too_long': 0, - 'bad_seq_length': 0 - } - - self.data = [] - - start = time.time() - for i, entry in enumerate(pdb_dict_list): - seq = entry['seq'] - name = entry['name'] - - bad_chars = set([s for s in seq]).difference(alphabet_set) - if len(bad_chars) == 0: - if len(entry['seq']) <= max_length: - self.data.append(entry) - else: - discard_count['too_long'] += 1 - else: - #print(name, bad_chars, entry['seq']) - discard_count['bad_chars'] += 1 - - # Truncate early - if truncate is not None and len(self.data) == truncate: - return - - if verbose and (i + 1) % 1000 == 0: - elapsed = time.time() - start - #print('{} entries ({} loaded) in {:.1f} s'.format(len(self.data), i+1, elapsed)) - - #print('Discarded', discard_count) - def __len__(self): - return len(self.data) - - def __getitem__(self, idx): - return self.data[idx] - - -class StructureLoader(): - def __init__(self, dataset, batch_size=100, shuffle=True, - collate_fn=lambda x:x, drop_last=False): - self.dataset = dataset - self.size = len(dataset) - self.lengths = [len(dataset[i]['seq']) for i in range(self.size)] - self.batch_size = batch_size - sorted_ix = np.argsort(self.lengths) - - # Cluster into batches of similar sizes - clusters, batch = [], [] - batch_max = 0 - for ix in sorted_ix: - size = self.lengths[ix] - if size * (len(batch) + 1) <= self.batch_size: - batch.append(ix) - batch_max = size - else: - clusters.append(batch) - batch, batch_max = [], 0 - if len(batch) > 0: - clusters.append(batch) - self.clusters = clusters - - def __len__(self): - return len(self.clusters) - - def __iter__(self): - np.random.shuffle(self.clusters) - for b_idx in self.clusters: - batch = [self.dataset[i] for i in b_idx] - yield batch - - -def worker_init_fn(worker_id): - np.random.seed() - -class NoamOpt: - "Optim wrapper that implements rate." - def __init__(self, model_size, factor, warmup, optimizer, step): - self.optimizer = optimizer - self._step = step - self.warmup = warmup - self.factor = factor - self.model_size = model_size - self._rate = 0 - - @property - def param_groups(self): - """Return param_groups.""" - return self.optimizer.param_groups - - def step(self): - "Update parameters and rate" - self._step += 1 - rate = self.rate() - for p in self.optimizer.param_groups: - p['lr'] = rate - self._rate = rate - self.optimizer.step() - - def rate(self, step = None): - "Implement `lrate` above" - if step is None: - step = self._step - return self.factor * \ - (self.model_size ** (-0.5) * - min(step ** (-0.5), step * self.warmup ** (-1.5))) - - def zero_grad(self): - self.optimizer.zero_grad() - -def get_std_opt(parameters, d_model, step): - return NoamOpt( - d_model, 2, 4000, torch.optim.Adam(parameters, lr=0, betas=(0.9, 0.98), eps=1e-9), step - ) - - - - -def get_pdbs(data_loader, repeat=1, max_length=10000, num_units=1000000): - init_alphabet = ['A', 'B', 'C', 'D', 'E', 'F', 'G','H', 'I', 'J','K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T','U', 'V','W','X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g','h', 'i', 'j','k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't','u', 'v','w','x', 'y', 'z'] - extra_alphabet = [str(item) for item in list(np.arange(300))] - chain_alphabet = init_alphabet + extra_alphabet - c = 0 - c1 = 0 - pdb_dict_list = [] - t0 = time.time() - for _ in range(repeat): - for step,t in enumerate(data_loader): - t = {k:v[0] for k,v in t.items()} - c1 += 1 - if 'label' in list(t): - my_dict = {} - s = 0 - concat_seq = '' - concat_N = [] - concat_CA = [] - concat_C = [] - concat_O = [] - concat_mask = [] - coords_dict = {} - mask_list = [] - visible_list = [] - if len(list(np.unique(t['idx']))) < 352: - for idx in list(np.unique(t['idx'])): - letter = chain_alphabet[idx] - res = np.argwhere(t['idx']==idx) - initial_sequence= "".join(list(np.array(list(t['seq']))[res][0,])) - if initial_sequence[-6:] == "HHHHHH": - res = res[:,:-6] - if initial_sequence[0:6] == "HHHHHH": - res = res[:,6:] - if initial_sequence[-7:-1] == "HHHHHH": - res = res[:,:-7] - if initial_sequence[-8:-2] == "HHHHHH": - res = res[:,:-8] - if initial_sequence[-9:-3] == "HHHHHH": - res = res[:,:-9] - if initial_sequence[-10:-4] == "HHHHHH": - res = res[:,:-10] - if initial_sequence[1:7] == "HHHHHH": - res = res[:,7:] - if initial_sequence[2:8] == "HHHHHH": - res = res[:,8:] - if initial_sequence[3:9] == "HHHHHH": - res = res[:,9:] - if initial_sequence[4:10] == "HHHHHH": - res = res[:,10:] - if res.shape[1] < 4: - pass - else: - my_dict['seq_chain_'+letter]= "".join(list(np.array(list(t['seq']))[res][0,])) - concat_seq += my_dict['seq_chain_'+letter] - if idx in t['masked']: - mask_list.append(letter) - else: - visible_list.append(letter) - coords_dict_chain = {} - all_atoms = np.array(t['xyz'][res,])[0,] #[L, 14, 3] - coords_dict_chain['N_chain_'+letter]=all_atoms[:,0,:].tolist() - coords_dict_chain['CA_chain_'+letter]=all_atoms[:,1,:].tolist() - coords_dict_chain['C_chain_'+letter]=all_atoms[:,2,:].tolist() - coords_dict_chain['O_chain_'+letter]=all_atoms[:,3,:].tolist() - my_dict['coords_chain_'+letter]=coords_dict_chain - my_dict['name']= t['label'] - my_dict['masked_list']= mask_list - my_dict['visible_list']= visible_list - my_dict['num_of_chains'] = len(mask_list) + len(visible_list) - my_dict['seq'] = concat_seq - if len(concat_seq) <= max_length: - pdb_dict_list.append(my_dict) - if len(pdb_dict_list) >= num_units: - break - return pdb_dict_list - - - -class PDB_dataset(torch.utils.data.Dataset): - def __init__(self, IDs, loader, train_dict, params): - self.IDs = IDs - self.train_dict = train_dict - self.loader = loader - self.params = params - - def __len__(self): - return len(self.IDs) - - def __getitem__(self, index): - ID = self.IDs[index] - sel_idx = np.random.randint(0, len(self.train_dict[ID])) - out = self.loader(self.train_dict[ID][sel_idx], self.params) - return out - - - -def loader_pdb(item,params): - - pdbid,chid = item[0].split('_') - PREFIX = "%s/pdb/%s/%s"%(params['DIR'],pdbid[1:3],pdbid) - - # load metadata - if not os.path.isfile(PREFIX+".pt"): - return {'seq': np.zeros(5)} - meta = torch.load(PREFIX+".pt") - asmb_ids = meta['asmb_ids'] - asmb_chains = meta['asmb_chains'] - chids = np.array(meta['chains']) - - # find candidate assemblies which contain chid chain - asmb_candidates = set([a for a,b in zip(asmb_ids,asmb_chains) - if chid in b.split(',')]) - - # if the chains is missing is missing from all the assemblies - # then return this chain alone - if len(asmb_candidates)<1: - chain = torch.load("%s_%s.pt"%(PREFIX,chid)) - L = len(chain['seq']) - return {'seq' : chain['seq'], - 'xyz' : chain['xyz'], - 'idx' : torch.zeros(L).int(), - 'masked' : torch.Tensor([0]).int(), - 'label' : item[0]} - - # randomly pick one assembly from candidates - asmb_i = random.sample(list(asmb_candidates), 1) - - # indices of selected transforms - idx = np.where(np.array(asmb_ids)==asmb_i)[0] - - # load relevant chains - chains = {c:torch.load("%s_%s.pt"%(PREFIX,c)) - for i in idx for c in asmb_chains[i] - if c in meta['chains']} - - # generate assembly - asmb = {} - for k in idx: - - # pick k-th xform - xform = meta['asmb_xform%d'%k] - u = xform[:,:3,:3] - r = xform[:,:3,3] - - # select chains which k-th xform should be applied to - s1 = set(meta['chains']) - s2 = set(asmb_chains[k].split(',')) - chains_k = s1&s2 - - # transform selected chains - for c in chains_k: - try: - xyz = chains[c]['xyz'] - xyz_ru = torch.einsum('bij,raj->brai', u, xyz) + r[:,None,None,:] - asmb.update({(c,k,i):xyz_i for i,xyz_i in enumerate(xyz_ru)}) - except KeyError: - return {'seq': np.zeros(5)} - - # select chains which share considerable similarity to chid - seqid = meta['tm'][chids==chid][0,:,1] - homo = set([ch_j for seqid_j,ch_j in zip(seqid,chids) - if seqid_j>params['HOMO']]) - # stack all chains in the assembly together - seq,xyz,idx,masked = "",[],[],[] - seq_list = [] - for counter,(k,v) in enumerate(asmb.items()): - seq += chains[k[0]]['seq'] - seq_list.append(chains[k[0]]['seq']) - xyz.append(v) - idx.append(torch.full((v.shape[0],),counter)) - if k[0] in homo: - masked.append(counter) - - return {'seq' : seq, - 'xyz' : torch.cat(xyz,dim=0), - 'idx' : torch.cat(idx,dim=0), - 'masked' : torch.Tensor(masked).int(), - 'label' : item[0]} - - - - -def build_training_clusters(params, debug): - val_ids = set([int(l) for l in open(params['VAL']).readlines()]) - test_ids = set([int(l) for l in open(params['TEST']).readlines()]) - - if debug: - val_ids = [] - test_ids = [] - - # read & clean list.csv - with open(params['LIST'], 'r') as f: - reader = csv.reader(f) - next(reader) - rows = [[r[0],r[3],int(r[4])] for r in reader - if float(r[2])<=params['RESCUT'] and - parser.parse(r[1])<=parser.parse(params['DATCUT'])] - - # compile training and validation sets - train = {} - valid = {} - test = {} - - if debug: - rows = rows[:20] - for r in rows: - if r[2] in val_ids: - if r[2] in valid.keys(): - valid[r[2]].append(r[:2]) - else: - valid[r[2]] = [r[:2]] - elif r[2] in test_ids: - if r[2] in test.keys(): - test[r[2]].append(r[:2]) - else: - test[r[2]] = [r[:2]] - else: - if r[2] in train.keys(): - train[r[2]].append(r[:2]) - else: - train[r[2]] = [r[:2]] - if debug: - valid=train - return train, valid, test diff --git a/spaces/RamAnanth1/stable-diffusion-xl/README.md b/spaces/RamAnanth1/stable-diffusion-xl/README.md deleted file mode 100644 index b22138889468a3ddbb2b1ed4b94cfa8568bf8060..0000000000000000000000000000000000000000 --- a/spaces/RamAnanth1/stable-diffusion-xl/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Stable Diffusion XL -emoji: 🔥 -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.11.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Ramse/TTS_Hindi/README.md b/spaces/Ramse/TTS_Hindi/README.md deleted file mode 100644 index 33594c78beb03a472379f25a2e55d36b8af7bfcb..0000000000000000000000000000000000000000 --- a/spaces/Ramse/TTS_Hindi/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: TTS Hindi -emoji: 🦀 -colorFrom: indigo -colorTo: purple -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/metadata/pkg_resources.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/metadata/pkg_resources.py deleted file mode 100644 index f330ef12a2c5ea0a4adbecbeea389741479d5eb4..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/metadata/pkg_resources.py +++ /dev/null @@ -1,270 +0,0 @@ -import email.message -import email.parser -import logging -import os -import zipfile -from typing import Collection, Iterable, Iterator, List, Mapping, NamedTuple, Optional - -from pip._vendor import pkg_resources -from pip._vendor.packaging.requirements import Requirement -from pip._vendor.packaging.utils import NormalizedName, canonicalize_name -from pip._vendor.packaging.version import parse as parse_version - -from pip._internal.exceptions import InvalidWheel, NoneMetadataError, UnsupportedWheel -from pip._internal.utils.egg_link import egg_link_path_from_location -from pip._internal.utils.misc import display_path, normalize_path -from pip._internal.utils.wheel import parse_wheel, read_wheel_metadata_file - -from .base import ( - BaseDistribution, - BaseEntryPoint, - BaseEnvironment, - DistributionVersion, - InfoPath, - Wheel, -) - -logger = logging.getLogger(__name__) - - -class EntryPoint(NamedTuple): - name: str - value: str - group: str - - -class InMemoryMetadata: - """IMetadataProvider that reads metadata files from a dictionary. - - This also maps metadata decoding exceptions to our internal exception type. - """ - - def __init__(self, metadata: Mapping[str, bytes], wheel_name: str) -> None: - self._metadata = metadata - self._wheel_name = wheel_name - - def has_metadata(self, name: str) -> bool: - return name in self._metadata - - def get_metadata(self, name: str) -> str: - try: - return self._metadata[name].decode() - except UnicodeDecodeError as e: - # Augment the default error with the origin of the file. - raise UnsupportedWheel( - f"Error decoding metadata for {self._wheel_name}: {e} in {name} file" - ) - - def get_metadata_lines(self, name: str) -> Iterable[str]: - return pkg_resources.yield_lines(self.get_metadata(name)) - - def metadata_isdir(self, name: str) -> bool: - return False - - def metadata_listdir(self, name: str) -> List[str]: - return [] - - def run_script(self, script_name: str, namespace: str) -> None: - pass - - -class Distribution(BaseDistribution): - def __init__(self, dist: pkg_resources.Distribution) -> None: - self._dist = dist - - @classmethod - def from_directory(cls, directory: str) -> BaseDistribution: - dist_dir = directory.rstrip(os.sep) - - # Build a PathMetadata object, from path to metadata. :wink: - base_dir, dist_dir_name = os.path.split(dist_dir) - metadata = pkg_resources.PathMetadata(base_dir, dist_dir) - - # Determine the correct Distribution object type. - if dist_dir.endswith(".egg-info"): - dist_cls = pkg_resources.Distribution - dist_name = os.path.splitext(dist_dir_name)[0] - else: - assert dist_dir.endswith(".dist-info") - dist_cls = pkg_resources.DistInfoDistribution - dist_name = os.path.splitext(dist_dir_name)[0].split("-")[0] - - dist = dist_cls(base_dir, project_name=dist_name, metadata=metadata) - return cls(dist) - - @classmethod - def from_metadata_file_contents( - cls, - metadata_contents: bytes, - filename: str, - project_name: str, - ) -> BaseDistribution: - metadata_dict = { - "METADATA": metadata_contents, - } - dist = pkg_resources.DistInfoDistribution( - location=filename, - metadata=InMemoryMetadata(metadata_dict, filename), - project_name=project_name, - ) - return cls(dist) - - @classmethod - def from_wheel(cls, wheel: Wheel, name: str) -> BaseDistribution: - try: - with wheel.as_zipfile() as zf: - info_dir, _ = parse_wheel(zf, name) - metadata_dict = { - path.split("/", 1)[-1]: read_wheel_metadata_file(zf, path) - for path in zf.namelist() - if path.startswith(f"{info_dir}/") - } - except zipfile.BadZipFile as e: - raise InvalidWheel(wheel.location, name) from e - except UnsupportedWheel as e: - raise UnsupportedWheel(f"{name} has an invalid wheel, {e}") - dist = pkg_resources.DistInfoDistribution( - location=wheel.location, - metadata=InMemoryMetadata(metadata_dict, wheel.location), - project_name=name, - ) - return cls(dist) - - @property - def location(self) -> Optional[str]: - return self._dist.location - - @property - def installed_location(self) -> Optional[str]: - egg_link = egg_link_path_from_location(self.raw_name) - if egg_link: - location = egg_link - elif self.location: - location = self.location - else: - return None - return normalize_path(location) - - @property - def info_location(self) -> Optional[str]: - return self._dist.egg_info - - @property - def installed_by_distutils(self) -> bool: - # A distutils-installed distribution is provided by FileMetadata. This - # provider has a "path" attribute not present anywhere else. Not the - # best introspection logic, but pip has been doing this for a long time. - try: - return bool(self._dist._provider.path) - except AttributeError: - return False - - @property - def canonical_name(self) -> NormalizedName: - return canonicalize_name(self._dist.project_name) - - @property - def version(self) -> DistributionVersion: - return parse_version(self._dist.version) - - def is_file(self, path: InfoPath) -> bool: - return self._dist.has_metadata(str(path)) - - def iter_distutils_script_names(self) -> Iterator[str]: - yield from self._dist.metadata_listdir("scripts") - - def read_text(self, path: InfoPath) -> str: - name = str(path) - if not self._dist.has_metadata(name): - raise FileNotFoundError(name) - content = self._dist.get_metadata(name) - if content is None: - raise NoneMetadataError(self, name) - return content - - def iter_entry_points(self) -> Iterable[BaseEntryPoint]: - for group, entries in self._dist.get_entry_map().items(): - for name, entry_point in entries.items(): - name, _, value = str(entry_point).partition("=") - yield EntryPoint(name=name.strip(), value=value.strip(), group=group) - - def _metadata_impl(self) -> email.message.Message: - """ - :raises NoneMetadataError: if the distribution reports `has_metadata()` - True but `get_metadata()` returns None. - """ - if isinstance(self._dist, pkg_resources.DistInfoDistribution): - metadata_name = "METADATA" - else: - metadata_name = "PKG-INFO" - try: - metadata = self.read_text(metadata_name) - except FileNotFoundError: - if self.location: - displaying_path = display_path(self.location) - else: - displaying_path = repr(self.location) - logger.warning("No metadata found in %s", displaying_path) - metadata = "" - feed_parser = email.parser.FeedParser() - feed_parser.feed(metadata) - return feed_parser.close() - - def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]: - if extras: # pkg_resources raises on invalid extras, so we sanitize. - extras = frozenset(extras).intersection(self._dist.extras) - return self._dist.requires(extras) - - def iter_provided_extras(self) -> Iterable[str]: - return self._dist.extras - - -class Environment(BaseEnvironment): - def __init__(self, ws: pkg_resources.WorkingSet) -> None: - self._ws = ws - - @classmethod - def default(cls) -> BaseEnvironment: - return cls(pkg_resources.working_set) - - @classmethod - def from_paths(cls, paths: Optional[List[str]]) -> BaseEnvironment: - return cls(pkg_resources.WorkingSet(paths)) - - def _iter_distributions(self) -> Iterator[BaseDistribution]: - for dist in self._ws: - yield Distribution(dist) - - def _search_distribution(self, name: str) -> Optional[BaseDistribution]: - """Find a distribution matching the ``name`` in the environment. - - This searches from *all* distributions available in the environment, to - match the behavior of ``pkg_resources.get_distribution()``. - """ - canonical_name = canonicalize_name(name) - for dist in self.iter_all_distributions(): - if dist.canonical_name == canonical_name: - return dist - return None - - def get_distribution(self, name: str) -> Optional[BaseDistribution]: - # Search the distribution by looking through the working set. - dist = self._search_distribution(name) - if dist: - return dist - - # If distribution could not be found, call working_set.require to - # update the working set, and try to find the distribution again. - # This might happen for e.g. when you install a package twice, once - # using setup.py develop and again using setup.py install. Now when - # running pip uninstall twice, the package gets removed from the - # working set in the first uninstall, so we have to populate the - # working set again so that pip knows about it and the packages gets - # picked up and is successfully uninstalled the second time too. - try: - # We didn't pass in any version specifiers, so this can never - # raise pkg_resources.VersionConflict. - self._ws.require(name) - except pkg_resources.DistributionNotFound: - return None - return self._search_distribution(name) diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/utils/distutils_args.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/utils/distutils_args.py deleted file mode 100644 index 2fd1862073f55d5551fc2c1bc1e9eaaed0c0e877..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/utils/distutils_args.py +++ /dev/null @@ -1,43 +0,0 @@ -from getopt import GetoptError, getopt -from typing import Dict, List - -_options = [ - "exec-prefix=", - "home=", - "install-base=", - "install-data=", - "install-headers=", - "install-lib=", - "install-platlib=", - "install-purelib=", - "install-scripts=", - "prefix=", - "root=", - "user", -] - - -def parse_distutils_args(args: List[str]) -> Dict[str, str]: - """Parse provided arguments, returning an object that has the matched arguments. - - Any unknown arguments are ignored. - """ - result = {} - for arg in args: - try: - parsed_opt, _ = getopt(args=[arg], shortopts="", longopts=_options) - except GetoptError: - # We don't care about any other options, which here may be - # considered unrecognized since our option list is not - # exhaustive. - continue - - if not parsed_opt: - continue - - option = parsed_opt[0] - name_from_parsed = option[0][2:].replace("-", "_") - value_from_parsed = option[1] or "true" - result[name_from_parsed] = value_from_parsed - - return result diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/chardet/cp949prober.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/chardet/cp949prober.py deleted file mode 100644 index 28a1f3dbb5785e8845e22db690b538f5159bb7f3..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/chardet/cp949prober.py +++ /dev/null @@ -1,49 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is mozilla.org code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .chardistribution import EUCKRDistributionAnalysis -from .codingstatemachine import CodingStateMachine -from .mbcharsetprober import MultiByteCharSetProber -from .mbcssm import CP949_SM_MODEL - - -class CP949Prober(MultiByteCharSetProber): - def __init__(self): - super().__init__() - self.coding_sm = CodingStateMachine(CP949_SM_MODEL) - # NOTE: CP949 is a superset of EUC-KR, so the distribution should be - # not different. - self.distribution_analyzer = EUCKRDistributionAnalysis() - self.reset() - - @property - def charset_name(self): - return "CP949" - - @property - def language(self): - return "Korean" diff --git a/spaces/Realcat/image-matching-webui/hloc/pipelines/4Seasons/utils.py b/spaces/Realcat/image-matching-webui/hloc/pipelines/4Seasons/utils.py deleted file mode 100644 index 8def70f225bff6a2e0f0433f6dbafbdfc031ef45..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/hloc/pipelines/4Seasons/utils.py +++ /dev/null @@ -1,234 +0,0 @@ -import os -import numpy as np -import logging -from pathlib import Path - -from ...utils.read_write_model import qvec2rotmat, rotmat2qvec -from ...utils.read_write_model import Image, write_model, Camera -from ...utils.parsers import parse_retrieval - -logger = logging.getLogger(__name__) - - -def get_timestamps(files, idx): - """Extract timestamps from a pose or relocalization file.""" - lines = [] - for p in files.parent.glob(files.name): - with open(p) as f: - lines += f.readlines() - timestamps = set() - for line in lines: - line = line.rstrip("\n") - if line[0] == "#" or line == "": - continue - ts = line.replace(",", " ").split()[idx] - timestamps.add(ts) - return timestamps - - -def delete_unused_images(root, timestamps): - """Delete all images in root if they are not contained in timestamps.""" - images = list(root.glob("**/*.png")) - deleted = 0 - for image in images: - ts = image.stem - if ts not in timestamps: - os.remove(image) - deleted += 1 - logger.info(f"Deleted {deleted} images in {root}.") - - -def camera_from_calibration_file(id_, path): - """Create a COLMAP camera from an MLAD calibration file.""" - with open(path, "r") as f: - data = f.readlines() - model, fx, fy, cx, cy = data[0].split()[:5] - width, height = data[1].split() - assert model == "Pinhole" - model_name = "PINHOLE" - params = [float(i) for i in [fx, fy, cx, cy]] - camera = Camera( - id=id_, - model=model_name, - width=int(width), - height=int(height), - params=params, - ) - return camera - - -def parse_poses(path, colmap=False): - """Parse a list of poses in COLMAP or MLAD quaternion convention.""" - poses = [] - with open(path) as f: - for line in f.readlines(): - line = line.rstrip("\n") - if line[0] == "#" or line == "": - continue - data = line.replace(",", " ").split() - ts, p = data[0], np.array(data[1:], float) - if colmap: - q, t = np.split(p, [4]) - else: - t, q = np.split(p, [3]) - q = q[[3, 0, 1, 2]] # xyzw to wxyz - R = qvec2rotmat(q) - poses.append((ts, R, t)) - return poses - - -def parse_relocalization(path, has_poses=False): - """Parse a relocalization file, possibly with poses.""" - reloc = [] - with open(path) as f: - for line in f.readlines(): - line = line.rstrip("\n") - if line[0] == "#" or line == "": - continue - data = line.replace(",", " ").split() - out = data[:2] # ref_ts, q_ts - if has_poses: - assert len(data) == 9 - t, q = np.split(np.array(data[2:], float), [3]) - q = q[[3, 0, 1, 2]] # xyzw to wxyz - R = qvec2rotmat(q) - out += [R, t] - reloc.append(out) - return reloc - - -def build_empty_colmap_model(root, sfm_dir): - """Build a COLMAP model with images and cameras only.""" - calibration = "Calibration/undistorted_calib_{}.txt" - cam0 = camera_from_calibration_file(0, root / calibration.format(0)) - cam1 = camera_from_calibration_file(1, root / calibration.format(1)) - cameras = {0: cam0, 1: cam1} - - T_0to1 = np.loadtxt(root / "Calibration/undistorted_calib_stereo.txt") - poses = parse_poses(root / "poses.txt") - images = {} - id_ = 0 - for ts, R_cam0_to_w, t_cam0_to_w in poses: - R_w_to_cam0 = R_cam0_to_w.T - t_w_to_cam0 = -(R_w_to_cam0 @ t_cam0_to_w) - - R_w_to_cam1 = T_0to1[:3, :3] @ R_w_to_cam0 - t_w_to_cam1 = T_0to1[:3, :3] @ t_w_to_cam0 + T_0to1[:3, 3] - - for idx, (R_w_to_cam, t_w_to_cam) in enumerate( - zip([R_w_to_cam0, R_w_to_cam1], [t_w_to_cam0, t_w_to_cam1]) - ): - image = Image( - id=id_, - qvec=rotmat2qvec(R_w_to_cam), - tvec=t_w_to_cam, - camera_id=idx, - name=f"cam{idx}/{ts}.png", - xys=np.zeros((0, 2), float), - point3D_ids=np.full(0, -1, int), - ) - images[id_] = image - id_ += 1 - - sfm_dir.mkdir(exist_ok=True, parents=True) - write_model(cameras, images, {}, path=str(sfm_dir), ext=".bin") - - -def generate_query_lists(timestamps, seq_dir, out_path): - """Create a list of query images with intrinsics from timestamps.""" - cam0 = camera_from_calibration_file( - 0, seq_dir / "Calibration/undistorted_calib_0.txt" - ) - intrinsics = [cam0.model, cam0.width, cam0.height] + cam0.params - intrinsics = [str(p) for p in intrinsics] - data = map(lambda ts: " ".join([f"cam0/{ts}.png"] + intrinsics), timestamps) - with open(out_path, "w") as f: - f.write("\n".join(data)) - - -def generate_localization_pairs(sequence, reloc, num, ref_pairs, out_path): - """Create the matching pairs for the localization. - We simply lookup the corresponding reference frame - and extract its `num` closest frames from the existing pair list. - """ - if "test" in sequence: - # hard pairs will be overwritten by easy ones if available - relocs = [ - str(reloc).replace("*", d) for d in ["hard", "moderate", "easy"] - ] - else: - relocs = [reloc] - query_to_ref_ts = {} - for reloc in relocs: - with open(reloc, "r") as f: - for line in f.readlines(): - line = line.rstrip("\n") - if line[0] == "#" or line == "": - continue - ref_ts, q_ts = line.split()[:2] - query_to_ref_ts[q_ts] = ref_ts - - ts_to_name = "cam0/{}.png".format - ref_pairs = parse_retrieval(ref_pairs) - loc_pairs = [] - for q_ts, ref_ts in query_to_ref_ts.items(): - ref_name = ts_to_name(ref_ts) - selected = [ref_name] + ref_pairs[ref_name][: num - 1] - loc_pairs.extend([" ".join((ts_to_name(q_ts), s)) for s in selected]) - with open(out_path, "w") as f: - f.write("\n".join(loc_pairs)) - - -def prepare_submission(results, relocs, poses_path, out_dir): - """Obtain relative poses from estimated absolute and reference poses.""" - gt_poses = parse_poses(poses_path) - all_T_ref0_to_w = {ts: (R, t) for ts, R, t in gt_poses} - - pred_poses = parse_poses(results, colmap=True) - all_T_w_to_q0 = {Path(name).stem: (R, t) for name, R, t in pred_poses} - - for reloc in relocs.parent.glob(relocs.name): - relative_poses = [] - reloc_ts = parse_relocalization(reloc) - for ref_ts, q_ts in reloc_ts: - R_w_to_q0, t_w_to_q0 = all_T_w_to_q0[q_ts] - R_ref0_to_w, t_ref0_to_w = all_T_ref0_to_w[ref_ts] - - R_ref0_to_q0 = R_w_to_q0 @ R_ref0_to_w - t_ref0_to_q0 = R_w_to_q0 @ t_ref0_to_w + t_w_to_q0 - - tvec = t_ref0_to_q0.tolist() - qvec = rotmat2qvec(R_ref0_to_q0)[[1, 2, 3, 0]] # wxyz to xyzw - - out = [ref_ts, q_ts] + list(map(str, tvec)) + list(map(str, qvec)) - relative_poses.append(" ".join(out)) - - out_path = out_dir / reloc.name - with open(out_path, "w") as f: - f.write("\n".join(relative_poses)) - logger.info(f"Submission file written to {out_path}.") - - -def evaluate_submission(submission_dir, relocs, ths=[0.1, 0.2, 0.5]): - """Compute the relocalization recall from predicted and ground truth poses.""" - for reloc in relocs.parent.glob(relocs.name): - poses_gt = parse_relocalization(reloc, has_poses=True) - poses_pred = parse_relocalization( - submission_dir / reloc.name, has_poses=True - ) - poses_pred = { - (ref_ts, q_ts): (R, t) for ref_ts, q_ts, R, t in poses_pred - } - - error = [] - for ref_ts, q_ts, R_gt, t_gt in poses_gt: - R, t = poses_pred[(ref_ts, q_ts)] - e = np.linalg.norm(t - t_gt) - error.append(e) - - error = np.array(error) - recall = [np.mean(error <= th) for th in ths] - s = f"Relocalization evaluation {submission_dir.name}/{reloc.name}\n" - s += " / ".join([f"{th:>7}m" for th in ths]) + "\n" - s += " / ".join([f"{100*r:>7.3f}%" for r in recall]) - logger.info(s) diff --git a/spaces/Redgon/bingo/src/components/toaster.tsx b/spaces/Redgon/bingo/src/components/toaster.tsx deleted file mode 100644 index 4d2693460b61307a1d4c127fd01df9bee16e59ff..0000000000000000000000000000000000000000 --- a/spaces/Redgon/bingo/src/components/toaster.tsx +++ /dev/null @@ -1,3 +0,0 @@ -'use client' - -export { Toaster } from 'react-hot-toast' diff --git a/spaces/Reeve/Ohayou_Face/models/mtcnn/mtcnn_pytorch/src/detector.py b/spaces/Reeve/Ohayou_Face/models/mtcnn/mtcnn_pytorch/src/detector.py deleted file mode 100644 index b162cff3194cc0114abd1a840e5dc772a55edd25..0000000000000000000000000000000000000000 --- a/spaces/Reeve/Ohayou_Face/models/mtcnn/mtcnn_pytorch/src/detector.py +++ /dev/null @@ -1,126 +0,0 @@ -import numpy as np -import torch -from torch.autograd import Variable -from .get_nets import PNet, RNet, ONet -from .box_utils import nms, calibrate_box, get_image_boxes, convert_to_square -from .first_stage import run_first_stage - - -def detect_faces(image, min_face_size=20.0, - thresholds=[0.6, 0.7, 0.8], - nms_thresholds=[0.7, 0.7, 0.7]): - """ - Arguments: - image: an instance of PIL.Image. - min_face_size: a float number. - thresholds: a list of length 3. - nms_thresholds: a list of length 3. - - Returns: - two float numpy arrays of shapes [n_boxes, 4] and [n_boxes, 10], - bounding boxes and facial landmarks. - """ - - # LOAD MODELS - pnet = PNet() - rnet = RNet() - onet = ONet() - onet.eval() - - # BUILD AN IMAGE PYRAMID - width, height = image.size - min_length = min(height, width) - - min_detection_size = 12 - factor = 0.707 # sqrt(0.5) - - # scales for scaling the image - scales = [] - - # scales the image so that - # minimum size that we can detect equals to - # minimum face size that we want to detect - m = min_detection_size / min_face_size - min_length *= m - - factor_count = 0 - while min_length > min_detection_size: - scales.append(m * factor ** factor_count) - min_length *= factor - factor_count += 1 - - # STAGE 1 - - # it will be returned - bounding_boxes = [] - - with torch.no_grad(): - # run P-Net on different scales - for s in scales: - boxes = run_first_stage(image, pnet, scale=s, threshold=thresholds[0]) - bounding_boxes.append(boxes) - - # collect boxes (and offsets, and scores) from different scales - bounding_boxes = [i for i in bounding_boxes if i is not None] - bounding_boxes = np.vstack(bounding_boxes) - - keep = nms(bounding_boxes[:, 0:5], nms_thresholds[0]) - bounding_boxes = bounding_boxes[keep] - - # use offsets predicted by pnet to transform bounding boxes - bounding_boxes = calibrate_box(bounding_boxes[:, 0:5], bounding_boxes[:, 5:]) - # shape [n_boxes, 5] - - bounding_boxes = convert_to_square(bounding_boxes) - bounding_boxes[:, 0:4] = np.round(bounding_boxes[:, 0:4]) - - # STAGE 2 - - img_boxes = get_image_boxes(bounding_boxes, image, size=24) - img_boxes = torch.FloatTensor(img_boxes) - - output = rnet(img_boxes) - offsets = output[0].data.numpy() # shape [n_boxes, 4] - probs = output[1].data.numpy() # shape [n_boxes, 2] - - keep = np.where(probs[:, 1] > thresholds[1])[0] - bounding_boxes = bounding_boxes[keep] - bounding_boxes[:, 4] = probs[keep, 1].reshape((-1,)) - offsets = offsets[keep] - - keep = nms(bounding_boxes, nms_thresholds[1]) - bounding_boxes = bounding_boxes[keep] - bounding_boxes = calibrate_box(bounding_boxes, offsets[keep]) - bounding_boxes = convert_to_square(bounding_boxes) - bounding_boxes[:, 0:4] = np.round(bounding_boxes[:, 0:4]) - - # STAGE 3 - - img_boxes = get_image_boxes(bounding_boxes, image, size=48) - if len(img_boxes) == 0: - return [], [] - img_boxes = torch.FloatTensor(img_boxes) - output = onet(img_boxes) - landmarks = output[0].data.numpy() # shape [n_boxes, 10] - offsets = output[1].data.numpy() # shape [n_boxes, 4] - probs = output[2].data.numpy() # shape [n_boxes, 2] - - keep = np.where(probs[:, 1] > thresholds[2])[0] - bounding_boxes = bounding_boxes[keep] - bounding_boxes[:, 4] = probs[keep, 1].reshape((-1,)) - offsets = offsets[keep] - landmarks = landmarks[keep] - - # compute landmark points - width = bounding_boxes[:, 2] - bounding_boxes[:, 0] + 1.0 - height = bounding_boxes[:, 3] - bounding_boxes[:, 1] + 1.0 - xmin, ymin = bounding_boxes[:, 0], bounding_boxes[:, 1] - landmarks[:, 0:5] = np.expand_dims(xmin, 1) + np.expand_dims(width, 1) * landmarks[:, 0:5] - landmarks[:, 5:10] = np.expand_dims(ymin, 1) + np.expand_dims(height, 1) * landmarks[:, 5:10] - - bounding_boxes = calibrate_box(bounding_boxes, offsets) - keep = nms(bounding_boxes, nms_thresholds[2], mode='min') - bounding_boxes = bounding_boxes[keep] - landmarks = landmarks[keep] - - return bounding_boxes, landmarks diff --git a/spaces/Robert001/UniControl-Demo/annotator/midas/midas/midas_net.py b/spaces/Robert001/UniControl-Demo/annotator/midas/midas/midas_net.py deleted file mode 100644 index 356e7538f5b9691babe061342fbf8f092360999f..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/midas/midas/midas_net.py +++ /dev/null @@ -1,86 +0,0 @@ -''' - * Copyright (c) 2023 Salesforce, Inc. - * All rights reserved. - * SPDX-License-Identifier: Apache License 2.0 - * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/ - * By Can Qin - * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet - * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala -''' - -"""MidashNet: Network for monocular depth estimation trained by mixing several datasets. -This file contains code that is adapted from -https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py -""" -import torch -import torch.nn as nn - -from .base_model import BaseModel -from .blocks import FeatureFusionBlock, Interpolate, _make_encoder - - -class MidasNet(BaseModel): - """Network for monocular depth estimation. - """ - - def __init__(self, path=None, features=256, non_negative=True): - """Init. - - Args: - path (str, optional): Path to saved model. Defaults to None. - features (int, optional): Number of features. Defaults to 256. - backbone (str, optional): Backbone network for encoder. Defaults to resnet50 - """ - print("Loading weights: ", path) - - super(MidasNet, self).__init__() - - use_pretrained = False if path is None else True - - self.pretrained, self.scratch = _make_encoder(backbone="resnext101_wsl", features=features, use_pretrained=use_pretrained) - - self.scratch.refinenet4 = FeatureFusionBlock(features) - self.scratch.refinenet3 = FeatureFusionBlock(features) - self.scratch.refinenet2 = FeatureFusionBlock(features) - self.scratch.refinenet1 = FeatureFusionBlock(features) - - self.scratch.output_conv = nn.Sequential( - nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1), - Interpolate(scale_factor=2, mode="bilinear"), - nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1), - nn.ReLU(True), - nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), - nn.ReLU(True) if non_negative else nn.Identity(), - ) - - if path: - self.load(path) - - def forward(self, x): - """Forward pass. - - Args: - x (tensor): input data (image) - - Returns: - tensor: depth - """ - - layer_1 = self.pretrained.layer1(x) - layer_2 = self.pretrained.layer2(layer_1) - layer_3 = self.pretrained.layer3(layer_2) - layer_4 = self.pretrained.layer4(layer_3) - - layer_1_rn = self.scratch.layer1_rn(layer_1) - layer_2_rn = self.scratch.layer2_rn(layer_2) - layer_3_rn = self.scratch.layer3_rn(layer_3) - layer_4_rn = self.scratch.layer4_rn(layer_4) - - path_4 = self.scratch.refinenet4(layer_4_rn) - path_3 = self.scratch.refinenet3(path_4, layer_3_rn) - path_2 = self.scratch.refinenet2(path_3, layer_2_rn) - path_1 = self.scratch.refinenet1(path_2, layer_1_rn) - - out = self.scratch.output_conv(path_1) - - return torch.squeeze(out, dim=1) diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/datasets/samplers/__init__.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/datasets/samplers/__init__.py deleted file mode 100644 index 2596aeb2ccfc85b58624713c04453d34e94a4062..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/datasets/samplers/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .distributed_sampler import DistributedSampler -from .group_sampler import DistributedGroupSampler, GroupSampler - -__all__ = ['DistributedSampler', 'DistributedGroupSampler', 'GroupSampler'] diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/utils/gaussian_target.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/utils/gaussian_target.py deleted file mode 100644 index 7bb7160cb4bf2f47876f6e8373142aa5846920a9..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/utils/gaussian_target.py +++ /dev/null @@ -1,185 +0,0 @@ -from math import sqrt - -import torch - - -def gaussian2D(radius, sigma=1, dtype=torch.float32, device='cpu'): - """Generate 2D gaussian kernel. - - Args: - radius (int): Radius of gaussian kernel. - sigma (int): Sigma of gaussian function. Default: 1. - dtype (torch.dtype): Dtype of gaussian tensor. Default: torch.float32. - device (str): Device of gaussian tensor. Default: 'cpu'. - - Returns: - h (Tensor): Gaussian kernel with a - ``(2 * radius + 1) * (2 * radius + 1)`` shape. - """ - x = torch.arange( - -radius, radius + 1, dtype=dtype, device=device).view(1, -1) - y = torch.arange( - -radius, radius + 1, dtype=dtype, device=device).view(-1, 1) - - h = (-(x * x + y * y) / (2 * sigma * sigma)).exp() - - h[h < torch.finfo(h.dtype).eps * h.max()] = 0 - return h - - -def gen_gaussian_target(heatmap, center, radius, k=1): - """Generate 2D gaussian heatmap. - - Args: - heatmap (Tensor): Input heatmap, the gaussian kernel will cover on - it and maintain the max value. - center (list[int]): Coord of gaussian kernel's center. - radius (int): Radius of gaussian kernel. - k (int): Coefficient of gaussian kernel. Default: 1. - - Returns: - out_heatmap (Tensor): Updated heatmap covered by gaussian kernel. - """ - diameter = 2 * radius + 1 - gaussian_kernel = gaussian2D( - radius, sigma=diameter / 6, dtype=heatmap.dtype, device=heatmap.device) - - x, y = center - - height, width = heatmap.shape[:2] - - left, right = min(x, radius), min(width - x, radius + 1) - top, bottom = min(y, radius), min(height - y, radius + 1) - - masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right] - masked_gaussian = gaussian_kernel[radius - top:radius + bottom, - radius - left:radius + right] - out_heatmap = heatmap - torch.max( - masked_heatmap, - masked_gaussian * k, - out=out_heatmap[y - top:y + bottom, x - left:x + right]) - - return out_heatmap - - -def gaussian_radius(det_size, min_overlap): - r"""Generate 2D gaussian radius. - - This function is modified from the `official github repo - `_. - - Given ``min_overlap``, radius could computed by a quadratic equation - according to Vieta's formulas. - - There are 3 cases for computing gaussian radius, details are following: - - - Explanation of figure: ``lt`` and ``br`` indicates the left-top and - bottom-right corner of ground truth box. ``x`` indicates the - generated corner at the limited position when ``radius=r``. - - - Case1: one corner is inside the gt box and the other is outside. - - .. code:: text - - |< width >| - - lt-+----------+ - - | | | ^ - +--x----------+--+ - | | | | - | | | | height - | | overlap | | - | | | | - | | | | v - +--+---------br--+ - - | | | - +----------+--x - - To ensure IoU of generated box and gt box is larger than ``min_overlap``: - - .. math:: - \cfrac{(w-r)*(h-r)}{w*h+(w+h)r-r^2} \ge {iou} \quad\Rightarrow\quad - {r^2-(w+h)r+\cfrac{1-iou}{1+iou}*w*h} \ge 0 \\ - {a} = 1,\quad{b} = {-(w+h)},\quad{c} = {\cfrac{1-iou}{1+iou}*w*h} - {r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a} - - - Case2: both two corners are inside the gt box. - - .. code:: text - - |< width >| - - lt-+----------+ - - | | | ^ - +--x-------+ | - | | | | - | |overlap| | height - | | | | - | +-------x--+ - | | | v - +----------+-br - - - To ensure IoU of generated box and gt box is larger than ``min_overlap``: - - .. math:: - \cfrac{(w-2*r)*(h-2*r)}{w*h} \ge {iou} \quad\Rightarrow\quad - {4r^2-2(w+h)r+(1-iou)*w*h} \ge 0 \\ - {a} = 4,\quad {b} = {-2(w+h)},\quad {c} = {(1-iou)*w*h} - {r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a} - - - Case3: both two corners are outside the gt box. - - .. code:: text - - |< width >| - - x--+----------------+ - | | | - +-lt-------------+ | - - | | | | ^ - | | | | - | | overlap | | height - | | | | - | | | | v - | +------------br--+ - - | | | - +----------------+--x - - To ensure IoU of generated box and gt box is larger than ``min_overlap``: - - .. math:: - \cfrac{w*h}{(w+2*r)*(h+2*r)} \ge {iou} \quad\Rightarrow\quad - {4*iou*r^2+2*iou*(w+h)r+(iou-1)*w*h} \le 0 \\ - {a} = {4*iou},\quad {b} = {2*iou*(w+h)},\quad {c} = {(iou-1)*w*h} \\ - {r} \le \cfrac{-b+\sqrt{b^2-4*a*c}}{2*a} - - Args: - det_size (list[int]): Shape of object. - min_overlap (float): Min IoU with ground truth for boxes generated by - keypoints inside the gaussian kernel. - - Returns: - radius (int): Radius of gaussian kernel. - """ - height, width = det_size - - a1 = 1 - b1 = (height + width) - c1 = width * height * (1 - min_overlap) / (1 + min_overlap) - sq1 = sqrt(b1**2 - 4 * a1 * c1) - r1 = (b1 - sq1) / (2 * a1) - - a2 = 4 - b2 = 2 * (height + width) - c2 = (1 - min_overlap) * width * height - sq2 = sqrt(b2**2 - 4 * a2 * c2) - r2 = (b2 - sq2) / (2 * a2) - - a3 = 4 * min_overlap - b3 = -2 * min_overlap * (height + width) - c3 = (min_overlap - 1) * width * height - sq3 = sqrt(b3**2 - 4 * a3 * c3) - r3 = (b3 + sq3) / (2 * a3) - return min(r1, r2, r3) diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/datasets/chase_db1.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/datasets/chase_db1.py deleted file mode 100644 index 8bc29bea14704a4407f83474610cbc3bef32c708..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/datasets/chase_db1.py +++ /dev/null @@ -1,27 +0,0 @@ -import os.path as osp - -from .builder import DATASETS -from .custom import CustomDataset - - -@DATASETS.register_module() -class ChaseDB1Dataset(CustomDataset): - """Chase_db1 dataset. - - In segmentation map annotation for Chase_db1, 0 stands for background, - which is included in 2 categories. ``reduce_zero_label`` is fixed to False. - The ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to - '_1stHO.png'. - """ - - CLASSES = ('background', 'vessel') - - PALETTE = [[120, 120, 120], [6, 230, 230]] - - def __init__(self, **kwargs): - super(ChaseDB1Dataset, self).__init__( - img_suffix='.png', - seg_map_suffix='_1stHO.png', - reduce_zero_label=False, - **kwargs) - assert osp.exists(self.img_dir) diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/utils/__init__.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/utils/__init__.py deleted file mode 100644 index ac489e2dbbc0e6fa87f5088b4edcc20f8cadc1a6..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/utils/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .collect_env import collect_env -from .logger import get_root_logger - -__all__ = ['get_root_logger', 'collect_env'] diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmseg/datasets/cityscapes.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmseg/datasets/cityscapes.py deleted file mode 100644 index 7961a97411567a2161d37be665e1e0da6bced4d1..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmseg/datasets/cityscapes.py +++ /dev/null @@ -1,229 +0,0 @@ -''' - * Copyright (c) 2023 Salesforce, Inc. - * All rights reserved. - * SPDX-License-Identifier: Apache License 2.0 - * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/ - * By Can Qin - * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet - * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala - * Modified from MMCV repo: From https://github.com/open-mmlab/mmcv - * Copyright (c) OpenMMLab. All rights reserved. -''' - -import os.path as osp -import tempfile - -import annotator.uniformer.mmcv as mmcv -import numpy as np -from annotator.uniformer.mmcv.utils import print_log -from PIL import Image - -from .builder import DATASETS -from .custom import CustomDataset - - -@DATASETS.register_module() -class CityscapesDataset(CustomDataset): - """Cityscapes dataset. - - The ``img_suffix`` is fixed to '_leftImg8bit.png' and ``seg_map_suffix`` is - fixed to '_gtFine_labelTrainIds.png' for Cityscapes dataset. - """ - - CLASSES = ('road', 'sidewalk', 'building', 'wall', 'fence', 'pole', - 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', - 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', - 'bicycle') - - PALETTE = [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], - [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], - [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], - [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], - [0, 80, 100], [0, 0, 230], [119, 11, 32]] - - def __init__(self, **kwargs): - super(CityscapesDataset, self).__init__( - img_suffix='_leftImg8bit.png', - seg_map_suffix='_gtFine_labelTrainIds.png', - **kwargs) - - @staticmethod - def _convert_to_label_id(result): - """Convert trainId to id for cityscapes.""" - if isinstance(result, str): - result = np.load(result) - import cityscapesscripts.helpers.labels as CSLabels - result_copy = result.copy() - for trainId, label in CSLabels.trainId2label.items(): - result_copy[result == trainId] = label.id - - return result_copy - - def results2img(self, results, imgfile_prefix, to_label_id): - """Write the segmentation results to images. - - Args: - results (list[list | tuple | ndarray]): Testing results of the - dataset. - imgfile_prefix (str): The filename prefix of the png files. - If the prefix is "somepath/xxx", - the png files will be named "somepath/xxx.png". - to_label_id (bool): whether convert output to label_id for - submission - - Returns: - list[str: str]: result txt files which contains corresponding - semantic segmentation images. - """ - mmcv.mkdir_or_exist(imgfile_prefix) - result_files = [] - prog_bar = mmcv.ProgressBar(len(self)) - for idx in range(len(self)): - result = results[idx] - if to_label_id: - result = self._convert_to_label_id(result) - filename = self.img_infos[idx]['filename'] - basename = osp.splitext(osp.basename(filename))[0] - - png_filename = osp.join(imgfile_prefix, f'{basename}.png') - - output = Image.fromarray(result.astype(np.uint8)).convert('P') - import cityscapesscripts.helpers.labels as CSLabels - palette = np.zeros((len(CSLabels.id2label), 3), dtype=np.uint8) - for label_id, label in CSLabels.id2label.items(): - palette[label_id] = label.color - - output.putpalette(palette) - output.save(png_filename) - result_files.append(png_filename) - prog_bar.update() - - return result_files - - def format_results(self, results, imgfile_prefix=None, to_label_id=True): - """Format the results into dir (standard format for Cityscapes - evaluation). - - Args: - results (list): Testing results of the dataset. - imgfile_prefix (str | None): The prefix of images files. It - includes the file path and the prefix of filename, e.g., - "a/b/prefix". If not specified, a temp file will be created. - Default: None. - to_label_id (bool): whether convert output to label_id for - submission. Default: False - - Returns: - tuple: (result_files, tmp_dir), result_files is a list containing - the image paths, tmp_dir is the temporal directory created - for saving json/png files when img_prefix is not specified. - """ - - assert isinstance(results, list), 'results must be a list' - assert len(results) == len(self), ( - 'The length of results is not equal to the dataset len: ' - f'{len(results)} != {len(self)}') - - if imgfile_prefix is None: - tmp_dir = tempfile.TemporaryDirectory() - imgfile_prefix = tmp_dir.name - else: - tmp_dir = None - result_files = self.results2img(results, imgfile_prefix, to_label_id) - - return result_files, tmp_dir - - def evaluate(self, - results, - metric='mIoU', - logger=None, - imgfile_prefix=None, - efficient_test=False): - """Evaluation in Cityscapes/default protocol. - - Args: - results (list): Testing results of the dataset. - metric (str | list[str]): Metrics to be evaluated. - logger (logging.Logger | None | str): Logger used for printing - related information during evaluation. Default: None. - imgfile_prefix (str | None): The prefix of output image file, - for cityscapes evaluation only. It includes the file path and - the prefix of filename, e.g., "a/b/prefix". - If results are evaluated with cityscapes protocol, it would be - the prefix of output png files. The output files would be - png images under folder "a/b/prefix/xxx.png", where "xxx" is - the image name of cityscapes. If not specified, a temp file - will be created for evaluation. - Default: None. - - Returns: - dict[str, float]: Cityscapes/default metrics. - """ - - eval_results = dict() - metrics = metric.copy() if isinstance(metric, list) else [metric] - if 'cityscapes' in metrics: - eval_results.update( - self._evaluate_cityscapes(results, logger, imgfile_prefix)) - metrics.remove('cityscapes') - if len(metrics) > 0: - eval_results.update( - super(CityscapesDataset, - self).evaluate(results, metrics, logger, efficient_test)) - - return eval_results - - def _evaluate_cityscapes(self, results, logger, imgfile_prefix): - """Evaluation in Cityscapes protocol. - - Args: - results (list): Testing results of the dataset. - logger (logging.Logger | str | None): Logger used for printing - related information during evaluation. Default: None. - imgfile_prefix (str | None): The prefix of output image file - - Returns: - dict[str: float]: Cityscapes evaluation results. - """ - try: - import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as CSEval # noqa - except ImportError: - raise ImportError('Please run "pip install cityscapesscripts" to ' - 'install cityscapesscripts first.') - msg = 'Evaluating in Cityscapes style' - if logger is None: - msg = '\n' + msg - print_log(msg, logger=logger) - - result_files, tmp_dir = self.format_results(results, imgfile_prefix) - - if tmp_dir is None: - result_dir = imgfile_prefix - else: - result_dir = tmp_dir.name - - eval_results = dict() - print_log(f'Evaluating results under {result_dir} ...', logger=logger) - - CSEval.args.evalInstLevelScore = True - CSEval.args.predictionPath = osp.abspath(result_dir) - CSEval.args.evalPixelAccuracy = True - CSEval.args.JSONOutput = False - - seg_map_list = [] - pred_list = [] - - # when evaluating with official cityscapesscripts, - # **_gtFine_labelIds.png is used - for seg_map in mmcv.scandir( - self.ann_dir, 'gtFine_labelIds.png', recursive=True): - seg_map_list.append(osp.join(self.ann_dir, seg_map)) - pred_list.append(CSEval.getPrediction(CSEval.args, seg_map)) - - eval_results.update( - CSEval.evaluateImgLists(pred_list, seg_map_list, CSEval.args)) - - if tmp_dir is not None: - tmp_dir.cleanup() - - return eval_results diff --git a/spaces/SAAZIZI/SummarizeAV/transcription_service/transcriber.py b/spaces/SAAZIZI/SummarizeAV/transcription_service/transcriber.py deleted file mode 100644 index aa48c76ce98f7734c4ff5570c0d54412e6ee8537..0000000000000000000000000000000000000000 --- a/spaces/SAAZIZI/SummarizeAV/transcription_service/transcriber.py +++ /dev/null @@ -1,55 +0,0 @@ -import json - -import torch -import whisper - -from logger import logger - - -class YouTubeTranscriber: - def __init__(self, video_id, filename, output_path_youtube, output_path_transcription, device=None): - self.video_id = video_id - self.filename = filename - self.output_path_transcription = output_path_transcription - self.transcription = None - self.output_path_youtube = output_path_youtube - if not device: - self.device = "cuda" if torch.cuda.is_available() else "cpu" - - def transcribe_audio(self, model_name): - audio = whisper.load_audio(f"{self.output_path_youtube}/{self.filename}") - model = whisper.load_model(model_name, device=self.device) - self.transcription = whisper.transcribe(model, audio) - - def write_to_json(self): - with open(f"{self.output_path_transcription}/{self.video_id}.json", "w") as f: - json.dump(self.transcription, f) - logger.info(f"Transcription downloaded to {self.output_path_transcription}/{self.video_id}.json") - - def merge_segments(self, num_to_merge): - merged_segments = [] - segments = self.transcription["segments"] - for i in range(0, len(segments), num_to_merge): - merged_dict = {} - slice_ = segments[i: i + num_to_merge] - - # Merging the 'text' fields - merged_dict["text"] = " ".join(item["text"] for item in slice_) - - # Get the 'start' time from the first dictionary and the 'end' time from the last dictionary - merged_dict["start"] = int(slice_[0]["start"]) - merged_dict["end"] = int(slice_[-1]["end"]) - - merged_segments.append(merged_dict) - - self.transcription["merged_segments"] = merged_segments - - def run(self, number_to_merge=4, model_name="base"): - logger.info("transcribe_audio") - self.transcribe_audio(model_name=model_name) - - logger.info("merge_segments") - self.merge_segments(number_to_merge) - - logger.info("write_to_json") - self.write_to_json() diff --git a/spaces/Salesforce/EDICT/my_diffusers/pipelines/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py b/spaces/Salesforce/EDICT/my_diffusers/pipelines/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py deleted file mode 100644 index 4979d88feee933483ac49c5cf71eef590d8fb34c..0000000000000000000000000000000000000000 --- a/spaces/Salesforce/EDICT/my_diffusers/pipelines/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py +++ /dev/null @@ -1,108 +0,0 @@ -import inspect -import warnings -from typing import Optional, Tuple, Union - -import torch - -from ...models import UNet2DModel, VQModel -from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput -from ...schedulers import DDIMScheduler - - -class LDMPipeline(DiffusionPipeline): - r""" - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) - - Parameters: - vqvae ([`VQModel`]): - Vector-quantized (VQ) Model to encode and decode images to and from latent representations. - unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image latents. - scheduler ([`SchedulerMixin`]): - [`DDIMScheduler`] is to be used in combination with `unet` to denoise the encoded image latens. - """ - - def __init__(self, vqvae: VQModel, unet: UNet2DModel, scheduler: DDIMScheduler): - super().__init__() - scheduler = scheduler.set_format("pt") - self.register_modules(vqvae=vqvae, unet=unet, scheduler=scheduler) - - @torch.no_grad() - def __call__( - self, - batch_size: int = 1, - generator: Optional[torch.Generator] = None, - eta: float = 0.0, - num_inference_steps: int = 50, - output_type: Optional[str] = "pil", - return_dict: bool = True, - **kwargs, - ) -> Union[Tuple, ImagePipelineOutput]: - - r""" - Args: - batch_size (`int`, *optional*, defaults to 1): - Number of images to generate. - generator (`torch.Generator`, *optional*): - A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation - deterministic. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `nd.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple. - - Returns: - [`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if - `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the - generated images. - """ - - if "torch_device" in kwargs: - device = kwargs.pop("torch_device") - warnings.warn( - "`torch_device` is deprecated as an input argument to `__call__` and will be removed in v0.3.0." - " Consider using `pipe.to(torch_device)` instead." - ) - - # Set device as before (to be removed in 0.3.0) - if device is None: - device = "cuda" if torch.cuda.is_available() else "cpu" - self.to(device) - - latents = torch.randn( - (batch_size, self.unet.in_channels, self.unet.sample_size, self.unet.sample_size), - generator=generator, - ) - latents = latents.to(self.device) - - self.scheduler.set_timesteps(num_inference_steps) - - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - - extra_kwargs = {} - if accepts_eta: - extra_kwargs["eta"] = eta - - for t in self.progress_bar(self.scheduler.timesteps): - # predict the noise residual - noise_prediction = self.unet(latents, t).sample - # compute the previous noisy sample x_t -> x_t-1 - latents = self.scheduler.step(noise_prediction, t, latents, **extra_kwargs).prev_sample - - # decode the image latents with the VAE - image = self.vqvae.decode(latents).sample - - image = (image / 2 + 0.5).clamp(0, 1) - image = image.cpu().permute(0, 2, 3, 1).numpy() - if output_type == "pil": - image = self.numpy_to_pil(image) - - if not return_dict: - return (image,) - - return ImagePipelineOutput(images=image) diff --git a/spaces/Salesforce/EDICT/my_diffusers/schedulers/scheduling_ddim.py b/spaces/Salesforce/EDICT/my_diffusers/schedulers/scheduling_ddim.py deleted file mode 100644 index ccfb0f7e648acc81750a98d317a03de715633588..0000000000000000000000000000000000000000 --- a/spaces/Salesforce/EDICT/my_diffusers/schedulers/scheduling_ddim.py +++ /dev/null @@ -1,270 +0,0 @@ -# Copyright 2022 Stanford University Team and The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion -# and https://github.com/hojonathanho/diffusion - -import math -from typing import Optional, Tuple, Union - -import numpy as np -import torch - -from ..configuration_utils import ConfigMixin, register_to_config -from .scheduling_utils import SchedulerMixin, SchedulerOutput - - -def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999): - """ - Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of - (1-beta) over time from t = [0,1]. - - Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up - to that part of the diffusion process. - - - Args: - num_diffusion_timesteps (`int`): the number of betas to produce. - max_beta (`float`): the maximum beta to use; use values lower than 1 to - prevent singularities. - - Returns: - betas (`np.ndarray`): the betas used by the scheduler to step the model outputs - """ - - def alpha_bar(time_step): - return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2 - - betas = [] - for i in range(num_diffusion_timesteps): - t1 = i / num_diffusion_timesteps - t2 = (i + 1) / num_diffusion_timesteps - betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) - return np.array(betas, dtype=np.float64) - - -class DDIMScheduler(SchedulerMixin, ConfigMixin): - """ - Denoising diffusion implicit models is a scheduler that extends the denoising procedure introduced in denoising - diffusion probabilistic models (DDPMs) with non-Markovian guidance. - - [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` - function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. - [`~ConfigMixin`] also provides general loading and saving functionality via the [`~ConfigMixin.save_config`] and - [`~ConfigMixin.from_config`] functios. - - For more details, see the original paper: https://arxiv.org/abs/2010.02502 - - Args: - num_train_timesteps (`int`): number of diffusion steps used to train the model. - beta_start (`float`): the starting `beta` value of inference. - beta_end (`float`): the final `beta` value. - beta_schedule (`str`): - the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from - `linear`, `scaled_linear`, or `squaredcos_cap_v2`. - trained_betas (`np.ndarray`, optional): TODO - timestep_values (`np.ndarray`, optional): TODO - clip_sample (`bool`, default `True`): - option to clip predicted sample between -1 and 1 for numerical stability. - set_alpha_to_one (`bool`, default `True`): - if alpha for final step is 1 or the final alpha of the "non-previous" one. - tensor_format (`str`): whether the scheduler expects pytorch or numpy arrays. - - """ - - @register_to_config - def __init__( - self, - num_train_timesteps: int = 1000, - beta_start: float = 0.0001, - beta_end: float = 0.02, - beta_schedule: str = "linear", - trained_betas: Optional[np.ndarray] = None, - timestep_values: Optional[np.ndarray] = None, - clip_sample: bool = True, - set_alpha_to_one: bool = True, - tensor_format: str = "pt", - ): - if trained_betas is not None: - self.betas = np.asarray(trained_betas) - if beta_schedule == "linear": - self.betas = np.linspace(beta_start, beta_end, num_train_timesteps, dtype=np.float64) - elif beta_schedule == "scaled_linear": - # this schedule is very specific to the latent diffusion model. - self.betas = np.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=np.float64) ** 2 - elif beta_schedule == "squaredcos_cap_v2": - # Glide cosine schedule - self.betas = betas_for_alpha_bar(num_train_timesteps) - else: - raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") - - self.alphas = 1.0 - self.betas - self.alphas_cumprod = np.cumprod(self.alphas, axis=0) - - # At every step in ddim, we are looking into the previous alphas_cumprod - # For the final step, there is no previous alphas_cumprod because we are already at 0 - # `set_alpha_to_one` decides whether we set this paratemer simply to one or - # whether we use the final alpha of the "non-previous" one. - self.final_alpha_cumprod = np.array(1.0) if set_alpha_to_one else self.alphas_cumprod[0] - - # setable values - self.num_inference_steps = None - self.timesteps = np.arange(0, num_train_timesteps)[::-1].copy() - - self.tensor_format = tensor_format - self.set_format(tensor_format=tensor_format) - - # print(self.alphas.shape) - - - def _get_variance(self, timestep, prev_timestep): - alpha_prod_t = self.alphas_cumprod[timestep] - alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod - beta_prod_t = 1 - alpha_prod_t - beta_prod_t_prev = 1 - alpha_prod_t_prev - - variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev) - - return variance - - def set_timesteps(self, num_inference_steps: int, offset: int = 0): - """ - Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. - - Args: - num_inference_steps (`int`): - the number of diffusion steps used when generating samples with a pre-trained model. - offset (`int`): TODO - """ - self.num_inference_steps = num_inference_steps - if num_inference_steps <= 1000: - self.timesteps = np.arange( - 0, self.config.num_train_timesteps, self.config.num_train_timesteps // self.num_inference_steps - )[::-1].copy() - else: - print("Hitting new logic, allowing fractional timesteps") - self.timesteps = np.linspace( - 0, self.config.num_train_timesteps-1, self.num_inference_steps, endpoint=True - )[::-1].copy() - self.timesteps += offset - self.set_format(tensor_format=self.tensor_format) - - def step( - self, - model_output: Union[torch.FloatTensor, np.ndarray], - timestep: int, - sample: Union[torch.FloatTensor, np.ndarray], - eta: float = 0.0, - use_clipped_model_output: bool = False, - generator=None, - return_dict: bool = True, - ) -> Union[SchedulerOutput, Tuple]: - """ - Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion - process from the learned model outputs (most often the predicted noise). - - Args: - model_output (`torch.FloatTensor` or `np.ndarray`): direct output from learned diffusion model. - timestep (`int`): current discrete timestep in the diffusion chain. - sample (`torch.FloatTensor` or `np.ndarray`): - current instance of sample being created by diffusion process. - eta (`float`): weight of noise for added noise in diffusion step. - use_clipped_model_output (`bool`): TODO - generator: random number generator. - return_dict (`bool`): option for returning tuple rather than SchedulerOutput class - - Returns: - [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: - [`~schedulers.scheduling_utils.SchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When - returning a tuple, the first element is the sample tensor. - - """ - if self.num_inference_steps is None: - raise ValueError( - "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" - ) - - # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf - # Ideally, read DDIM paper in-detail understanding - - # Notation ( -> - # - pred_noise_t -> e_theta(x_t, t) - # - pred_original_sample -> f_theta(x_t, t) or x_0 - # - std_dev_t -> sigma_t - # - eta -> η - # - pred_sample_direction -> "direction pointingc to x_t" - # - pred_prev_sample -> "x_t-1" - - # 1. get previous step value (=t-1) - prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps - - # 2. compute alphas, betas - alpha_prod_t = self.alphas_cumprod[timestep] - alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod - beta_prod_t = 1 - alpha_prod_t - - # 3. compute predicted original sample from predicted noise also called - # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf - pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) - - # 4. Clip "predicted x_0" - if self.config.clip_sample: - pred_original_sample = self.clip(pred_original_sample, -1, 1) - - # 5. compute variance: "sigma_t(η)" -> see formula (16) - # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) - variance = self._get_variance(timestep, prev_timestep) - std_dev_t = eta * variance ** (0.5) - - if use_clipped_model_output: - # the model_output is always re-derived from the clipped x_0 in Glide - model_output = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) - - # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf - pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * model_output - - # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf - prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction - - if eta > 0: - device = model_output.device if torch.is_tensor(model_output) else "cpu" - noise = torch.randn(model_output.shape, generator=generator).to(device) - variance = self._get_variance(timestep, prev_timestep) ** (0.5) * eta * noise - - if not torch.is_tensor(model_output): - variance = variance.numpy() - - prev_sample = prev_sample + variance - - if not return_dict: - return (prev_sample,) - - return SchedulerOutput(prev_sample=prev_sample) - - def add_noise( - self, - original_samples: Union[torch.FloatTensor, np.ndarray], - noise: Union[torch.FloatTensor, np.ndarray], - timesteps: Union[torch.IntTensor, np.ndarray], - ) -> Union[torch.FloatTensor, np.ndarray]: - sqrt_alpha_prod = self.alphas_cumprod[timesteps] ** 0.5 - sqrt_alpha_prod = self.match_shape(sqrt_alpha_prod, original_samples) - sqrt_one_minus_alpha_prod = (1 - self.alphas_cumprod[timesteps]) ** 0.5 - sqrt_one_minus_alpha_prod = self.match_shape(sqrt_one_minus_alpha_prod, original_samples) - - noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise - return noisy_samples - - def __len__(self): - return self.config.num_train_timesteps diff --git a/spaces/Salesforce/EDICT/my_half_diffusers/commands/__init__.py b/spaces/Salesforce/EDICT/my_half_diffusers/commands/__init__.py deleted file mode 100644 index 902bd46cedc6f2df785c1dc5d2e6bd8ef7c69ca6..0000000000000000000000000000000000000000 --- a/spaces/Salesforce/EDICT/my_half_diffusers/commands/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from abc import ABC, abstractmethod -from argparse import ArgumentParser - - -class BaseDiffusersCLICommand(ABC): - @staticmethod - @abstractmethod - def register_subcommand(parser: ArgumentParser): - raise NotImplementedError() - - @abstractmethod - def run(self): - raise NotImplementedError() diff --git a/spaces/SameerR007/Movie_Recommendation_updated/README.md b/spaces/SameerR007/Movie_Recommendation_updated/README.md deleted file mode 100644 index 0a5e124da9bd663bb0c6141f0a95f713ad9e9afb..0000000000000000000000000000000000000000 --- a/spaces/SameerR007/Movie_Recommendation_updated/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Movie Recommendation Updated -emoji: 📚 -colorFrom: green -colorTo: red -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ServerX/PorcoDiaz/MDXNet.py b/spaces/ServerX/PorcoDiaz/MDXNet.py deleted file mode 100644 index 9b7eb43844ad0d4f9ce61287ccf9a8a4206d3853..0000000000000000000000000000000000000000 --- a/spaces/ServerX/PorcoDiaz/MDXNet.py +++ /dev/null @@ -1,272 +0,0 @@ -import soundfile as sf -import torch, pdb, os, warnings, librosa -import numpy as np -import onnxruntime as ort -from tqdm import tqdm -import torch - -dim_c = 4 - - -class Conv_TDF_net_trim: - def __init__( - self, device, model_name, target_name, L, dim_f, dim_t, n_fft, hop=1024 - ): - super(Conv_TDF_net_trim, self).__init__() - - self.dim_f = dim_f - self.dim_t = 2**dim_t - self.n_fft = n_fft - self.hop = hop - self.n_bins = self.n_fft // 2 + 1 - self.chunk_size = hop * (self.dim_t - 1) - self.window = torch.hann_window(window_length=self.n_fft, periodic=True).to( - device - ) - self.target_name = target_name - self.blender = "blender" in model_name - - out_c = dim_c * 4 if target_name == "*" else dim_c - self.freq_pad = torch.zeros( - [1, out_c, self.n_bins - self.dim_f, self.dim_t] - ).to(device) - - self.n = L // 2 - - def stft(self, x): - x = x.reshape([-1, self.chunk_size]) - x = torch.stft( - x, - n_fft=self.n_fft, - hop_length=self.hop, - window=self.window, - center=True, - return_complex=True, - ) - x = torch.view_as_real(x) - x = x.permute([0, 3, 1, 2]) - x = x.reshape([-1, 2, 2, self.n_bins, self.dim_t]).reshape( - [-1, dim_c, self.n_bins, self.dim_t] - ) - return x[:, :, : self.dim_f] - - def istft(self, x, freq_pad=None): - freq_pad = ( - self.freq_pad.repeat([x.shape[0], 1, 1, 1]) - if freq_pad is None - else freq_pad - ) - x = torch.cat([x, freq_pad], -2) - c = 4 * 2 if self.target_name == "*" else 2 - x = x.reshape([-1, c, 2, self.n_bins, self.dim_t]).reshape( - [-1, 2, self.n_bins, self.dim_t] - ) - x = x.permute([0, 2, 3, 1]) - x = x.contiguous() - x = torch.view_as_complex(x) - x = torch.istft( - x, n_fft=self.n_fft, hop_length=self.hop, window=self.window, center=True - ) - return x.reshape([-1, c, self.chunk_size]) - - -def get_models(device, dim_f, dim_t, n_fft): - return Conv_TDF_net_trim( - device=device, - model_name="Conv-TDF", - target_name="vocals", - L=11, - dim_f=dim_f, - dim_t=dim_t, - n_fft=n_fft, - ) - - -warnings.filterwarnings("ignore") -cpu = torch.device("cpu") -if torch.cuda.is_available(): - device = torch.device("cuda:0") -elif torch.backends.mps.is_available(): - device = torch.device("mps") -else: - device = torch.device("cpu") - - -class Predictor: - def __init__(self, args): - self.args = args - self.model_ = get_models( - device=cpu, dim_f=args.dim_f, dim_t=args.dim_t, n_fft=args.n_fft - ) - self.model = ort.InferenceSession( - os.path.join(args.onnx, self.model_.target_name + ".onnx"), - providers=["CUDAExecutionProvider", "CPUExecutionProvider"], - ) - print("onnx load done") - - def demix(self, mix): - samples = mix.shape[-1] - margin = self.args.margin - chunk_size = self.args.chunks * 44100 - assert not margin == 0, "margin cannot be zero!" - if margin > chunk_size: - margin = chunk_size - - segmented_mix = {} - - if self.args.chunks == 0 or samples < chunk_size: - chunk_size = samples - - counter = -1 - for skip in range(0, samples, chunk_size): - counter += 1 - - s_margin = 0 if counter == 0 else margin - end = min(skip + chunk_size + margin, samples) - - start = skip - s_margin - - segmented_mix[skip] = mix[:, start:end].copy() - if end == samples: - break - - sources = self.demix_base(segmented_mix, margin_size=margin) - """ - mix:(2,big_sample) - segmented_mix:offset->(2,small_sample) - sources:(1,2,big_sample) - """ - return sources - - def demix_base(self, mixes, margin_size): - chunked_sources = [] - progress_bar = tqdm(total=len(mixes)) - progress_bar.set_description("Processing") - for mix in mixes: - cmix = mixes[mix] - sources = [] - n_sample = cmix.shape[1] - model = self.model_ - trim = model.n_fft // 2 - gen_size = model.chunk_size - 2 * trim - pad = gen_size - n_sample % gen_size - mix_p = np.concatenate( - (np.zeros((2, trim)), cmix, np.zeros((2, pad)), np.zeros((2, trim))), 1 - ) - mix_waves = [] - i = 0 - while i < n_sample + pad: - waves = np.array(mix_p[:, i : i + model.chunk_size]) - mix_waves.append(waves) - i += gen_size - mix_waves = torch.tensor(mix_waves, dtype=torch.float32).to(cpu) - with torch.no_grad(): - _ort = self.model - spek = model.stft(mix_waves) - if self.args.denoise: - spec_pred = ( - -_ort.run(None, {"input": -spek.cpu().numpy()})[0] * 0.5 - + _ort.run(None, {"input": spek.cpu().numpy()})[0] * 0.5 - ) - tar_waves = model.istft(torch.tensor(spec_pred)) - else: - tar_waves = model.istft( - torch.tensor(_ort.run(None, {"input": spek.cpu().numpy()})[0]) - ) - tar_signal = ( - tar_waves[:, :, trim:-trim] - .transpose(0, 1) - .reshape(2, -1) - .numpy()[:, :-pad] - ) - - start = 0 if mix == 0 else margin_size - end = None if mix == list(mixes.keys())[::-1][0] else -margin_size - if margin_size == 0: - end = None - sources.append(tar_signal[:, start:end]) - - progress_bar.update(1) - - chunked_sources.append(sources) - _sources = np.concatenate(chunked_sources, axis=-1) - # del self.model - progress_bar.close() - return _sources - - def prediction(self, m, vocal_root, others_root, format): - os.makedirs(vocal_root, exist_ok=True) - os.makedirs(others_root, exist_ok=True) - basename = os.path.basename(m) - mix, rate = librosa.load(m, mono=False, sr=44100) - if mix.ndim == 1: - mix = np.asfortranarray([mix, mix]) - mix = mix.T - sources = self.demix(mix.T) - opt = sources[0].T - if format in ["wav", "flac"]: - sf.write( - "%s/%s_main_vocal.%s" % (vocal_root, basename, format), mix - opt, rate - ) - sf.write("%s/%s_others.%s" % (others_root, basename, format), opt, rate) - else: - path_vocal = "%s/%s_main_vocal.wav" % (vocal_root, basename) - path_other = "%s/%s_others.wav" % (others_root, basename) - sf.write(path_vocal, mix - opt, rate) - sf.write(path_other, opt, rate) - if os.path.exists(path_vocal): - os.system( - "ffmpeg -i %s -vn %s -q:a 2 -y" - % (path_vocal, path_vocal[:-4] + ".%s" % format) - ) - if os.path.exists(path_other): - os.system( - "ffmpeg -i %s -vn %s -q:a 2 -y" - % (path_other, path_other[:-4] + ".%s" % format) - ) - - -class MDXNetDereverb: - def __init__(self, chunks): - self.onnx = "uvr5_weights/onnx_dereverb_By_FoxJoy" - self.shifts = 10 #'Predict with randomised equivariant stabilisation' - self.mixing = "min_mag" # ['default','min_mag','max_mag'] - self.chunks = chunks - self.margin = 44100 - self.dim_t = 9 - self.dim_f = 3072 - self.n_fft = 6144 - self.denoise = True - self.pred = Predictor(self) - - def _path_audio_(self, input, vocal_root, others_root, format): - self.pred.prediction(input, vocal_root, others_root, format) - - -if __name__ == "__main__": - dereverb = MDXNetDereverb(15) - from time import time as ttime - - t0 = ttime() - dereverb._path_audio_( - "雪雪伴奏对消HP5.wav", - "vocal", - "others", - ) - t1 = ttime() - print(t1 - t0) - - -""" - -runtime\python.exe MDXNet.py - -6G: -15/9:0.8G->6.8G -14:0.8G->6.5G -25:炸 - -half15:0.7G->6.6G,22.69s -fp32-15:0.7G->6.6G,20.85s - -""" diff --git a/spaces/ServerX/PorcoDiaz/audioEffects.py b/spaces/ServerX/PorcoDiaz/audioEffects.py deleted file mode 100644 index 1830b19e1a5e3ec1f431388d8444ef3a2c9ed91f..0000000000000000000000000000000000000000 --- a/spaces/ServerX/PorcoDiaz/audioEffects.py +++ /dev/null @@ -1,37 +0,0 @@ -from pedalboard import Pedalboard, Compressor, Reverb, NoiseGate -from pedalboard.io import AudioFile -import sys -import os -now_dir = os.getcwd() -sys.path.append(now_dir) -from i18n import I18nAuto -i18n = I18nAuto() -from pydub import AudioSegment -import numpy as np -import soundfile as sf -from pydub.playback import play - -def process_audio(input_path, output_path, reverb_enabled, compressor_enabled, noise_gate_enabled, ): - print(reverb_enabled) - print(compressor_enabled) - print(noise_gate_enabled) - effects = [] - if reverb_enabled: - effects.append(Reverb(room_size=0.01)) - if compressor_enabled: - effects.append(Compressor(threshold_db=-10, ratio=25)) - if noise_gate_enabled: - effects.append(NoiseGate(threshold_db=-16, ratio=1.5, release_ms=250)) - - board = Pedalboard(effects) - - with AudioFile(input_path) as f: - with AudioFile(output_path, 'w', f.samplerate, f.num_channels) as o: - while f.tell() < f.frames: - chunk = f.read(f.samplerate) - effected = board(chunk, f.samplerate, reset=False) - o.write(effected) - - result = i18n("Processed audio saved at: ") + output_path - print(result) - return output_path \ No newline at end of file diff --git a/spaces/ShreyashNadage/InvestmentCopilot/NewsAnalyzer.py b/spaces/ShreyashNadage/InvestmentCopilot/NewsAnalyzer.py deleted file mode 100644 index f2914a5351b4a0ca88b757f547064a9a237c4b09..0000000000000000000000000000000000000000 --- a/spaces/ShreyashNadage/InvestmentCopilot/NewsAnalyzer.py +++ /dev/null @@ -1,29 +0,0 @@ -from pygooglenews import GoogleNews -from transformers import BertTokenizer, BertForSequenceClassification -from transformers import pipeline -import pandas as pd - -finbert = BertForSequenceClassification.from_pretrained('yiyanghkust/finbert-tone',num_labels=3) -tokenizer = BertTokenizer.from_pretrained('yiyanghkust/finbert-tone') -nlp = pipeline("sentiment-analysis", model=finbert, tokenizer=tokenizer) - -num_top_headlines = 10 - -gn = GoogleNews(lang='en',country='IN') - -def get_headlines(searchterm='Nifty'): - test = gn.search(searchterm, when='5d') - newslist = [i['title'] for i in test['entries']] - return newslist[:num_top_headlines] - -def get_sentimental_analysis(newslist): - results = nlp(newslist) - df = pd.DataFrame({'headlines': newslist, \ - 'results': [i['label'] for i in results]}) - return df.results.value_counts().sort_values(ascending=False).index[0] - -def get_nifty_sentiment(): - newslist = get_headlines() - return get_sentimental_analysis(newslist) - - diff --git a/spaces/SpacesExamples/test-docker-go/main.go b/spaces/SpacesExamples/test-docker-go/main.go deleted file mode 100644 index 0881626741bab11a0858f356db203dd650379cfa..0000000000000000000000000000000000000000 --- a/spaces/SpacesExamples/test-docker-go/main.go +++ /dev/null @@ -1,17 +0,0 @@ -package main - -import ( - "fmt" - "net/http" - "net/url" -) - -func main() { - http.HandleFunc("/", HelloServer) - http.ListenAndServe(":8080", nil) -} - -func HelloServer(w http.ResponseWriter, r *http.Request) { - m, _ := url.ParseQuery(r.URL.RawQuery) - fmt.Fprintf(w, "Hello, %s!", m["q"]) -} diff --git a/spaces/Stearns/Soar/pysoarlib/WMInterface.py b/spaces/Stearns/Soar/pysoarlib/WMInterface.py deleted file mode 100644 index e814bf9f13d266ebdfd70c68126c7782b3598fb0..0000000000000000000000000000000000000000 --- a/spaces/Stearns/Soar/pysoarlib/WMInterface.py +++ /dev/null @@ -1,53 +0,0 @@ -""" -This module defines a utility interface called WMInterface -which defines a standard interface for adding and removing things from working memory -""" - -class WMInterface(object): - """ An interface standardizing how to add/remove items from working memory """ - - def __init__(self): - self.added = False - - def is_added(self): - """ Returns true if the wme is currently in soar's working memory """ - return self.added - - def add_to_wm(self, parent_id): - """ Creates a structure in working memory rooted at the given parent_id """ - if self.added: - self._remove_from_wm_impl() - self._add_to_wm_impl(parent_id) - self.added = True - - def update_wm(self, parent_id = None): - """ Updates the structure in Soar's working memory - It will also add it to wm if parent_id is not None """ - if self.added: - self._update_wm_impl() - elif parent_id: - self._add_to_wm_impl(parent_id) - self.added = True - - def remove_from_wm(self): - """ Removes the structure from Soar's working memory """ - if not self.added: - return - self._remove_from_wm_impl() - self.added = False - - - ### Internal Methods - To be implemented by derived classes - - def _add_to_wm_impl(self, parent_id): - """ Method to implement in derived class - add to working memory """ - pass - - def _update_wm_impl(self): - """ Method to implement in derived class - update working memory """ - pass - - def _remove_from_wm_impl(self): - """ Method to implement in derived class - remove from working memory """ - pass - diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/aiofiles/threadpool/utils.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/aiofiles/threadpool/utils.py deleted file mode 100644 index f429877cd0c139616b7a7a8e951af86c16c74796..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/aiofiles/threadpool/utils.py +++ /dev/null @@ -1,74 +0,0 @@ -import functools -from types import coroutine - - -def delegate_to_executor(*attrs): - def cls_builder(cls): - for attr_name in attrs: - setattr(cls, attr_name, _make_delegate_method(attr_name)) - return cls - - return cls_builder - - -def proxy_method_directly(*attrs): - def cls_builder(cls): - for attr_name in attrs: - setattr(cls, attr_name, _make_proxy_method(attr_name)) - return cls - - return cls_builder - - -def proxy_property_directly(*attrs): - def cls_builder(cls): - for attr_name in attrs: - setattr(cls, attr_name, _make_proxy_property(attr_name)) - return cls - - return cls_builder - - -def cond_delegate_to_executor(*attrs): - def cls_builder(cls): - for attr_name in attrs: - setattr(cls, attr_name, _make_cond_delegate_method(attr_name)) - return cls - - return cls_builder - - -def _make_delegate_method(attr_name): - @coroutine - def method(self, *args, **kwargs): - cb = functools.partial(getattr(self._file, attr_name), *args, **kwargs) - return (yield from self._loop.run_in_executor(self._executor, cb)) - - return method - - -def _make_proxy_method(attr_name): - def method(self, *args, **kwargs): - return getattr(self._file, attr_name)(*args, **kwargs) - - return method - - -def _make_proxy_property(attr_name): - def proxy_property(self): - return getattr(self._file, attr_name) - - return property(proxy_property) - - -def _make_cond_delegate_method(attr_name): - """For spooled temp files, delegate only if rolled to file object""" - - async def method(self, *args, **kwargs): - if self._file._rolled: - cb = functools.partial(getattr(self._file, attr_name), *args, **kwargs) - return await self._loop.run_in_executor(self._executor, cb) - else: - return getattr(self._file, attr_name)(*args, **kwargs) - - return method diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_plugin_utils.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_plugin_utils.py deleted file mode 100644 index 0cd0d76152c90f33fe1d18949ae794c9567abbab..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_plugin_utils.py +++ /dev/null @@ -1,91 +0,0 @@ -import types - -from _pydev_bundle import pydev_log -from _pydevd_bundle import pydevd_trace_api - -try: - from pydevd_plugins import django_debug -except: - django_debug = None - pydev_log.debug('Unable to load django_debug plugin') - -try: - from pydevd_plugins import jinja2_debug -except: - jinja2_debug = None - pydev_log.debug('Unable to load jinja2_debug plugin') - -def load_plugins(): - plugins = [] - if django_debug is not None: - plugins.append(django_debug) - - if jinja2_debug is not None: - plugins.append(jinja2_debug) - return plugins - - -def bind_func_to_method(func, obj, method_name): - bound_method = types.MethodType(func, obj) - - setattr(obj, method_name, bound_method) - return bound_method - - -class PluginManager(object): - - def __init__(self, main_debugger): - self.plugins = load_plugins() - self.active_plugins = [] - self.main_debugger = main_debugger - self.rebind_methods() - - def add_breakpoint(self, func_name, *args, **kwargs): - # add breakpoint for plugin and remember which plugin to use in tracing - for plugin in self.plugins: - if hasattr(plugin, func_name): - func = getattr(plugin, func_name) - result = func(self, *args, **kwargs) - if result: - self.activate(plugin) - - return result - return None - - def activate(self, plugin): - if plugin not in self.active_plugins: - self.active_plugins.append(plugin) - self.rebind_methods() - - def rebind_methods(self): - if len(self.active_plugins) == 0: - self.bind_functions(pydevd_trace_api, getattr, pydevd_trace_api) - elif len(self.active_plugins) == 1: - self.bind_functions(pydevd_trace_api, getattr, self.active_plugins[0]) - else: - self.bind_functions(pydevd_trace_api, create_dispatch, self.active_plugins) - - def bind_functions(self, interface, function_factory, arg): - for name in dir(interface): - func = function_factory(arg, name) - if type(func) == types.FunctionType: - bind_func_to_method(func, self, name) - - -def create_dispatch(obj, name): - def dispatch(self, *args, **kwargs): - result = None - for p in self.active_plugins: - r = getattr(p, name)(self, *args, **kwargs) - if not result: - result = r - return result - return dispatch - - - - - - - - diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv_custom/__init__.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv_custom/__init__.py deleted file mode 100644 index 4b958738b9fd93bfcec239c550df1d9a44b8c536..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv_custom/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# -*- coding: utf-8 -*- - -from .checkpoint import load_checkpoint - -__all__ = ['load_checkpoint'] \ No newline at end of file diff --git a/spaces/TEnngal/TEnngal/README.md b/spaces/TEnngal/TEnngal/README.md deleted file mode 100644 index 5d6936218874c647b5d22e13ad4be7edb8936f92..0000000000000000000000000000000000000000 --- a/spaces/TEnngal/TEnngal/README.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: bingo -emoji: 😊 -colorFrom: red -colorTo: red -sdk: docker -license: mit -duplicated_from: hf4all/bingo ---- - -
- -# Bingo - -Bingo,一个让你呼吸顺畅 New Bing。 - -高度还原 New Bing 网页版的主要操作,国内可用,兼容绝大多数微软 Bing AI 的功能,可自行部署使用。 - -![Github stars](https://badgen.net/github/stars/weaigc/bingo?icon=github&label=stars) -![Gthub issues](https://img.shields.io/github/issues/weaigc/bingo) -[![docker build](https://github.com/weaigc/bingo/actions/workflows/docker.yml/badge.svg)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![docker hub](https://badgen.net/docker/size/weaigc/bingo?icon=docker&label=image%20size)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![MIT License](https://img.shields.io/badge/license-MIT-97c50f)](https://github.com/weaigc/bingo/blob/main/license) - -问题反馈请前往 https://github.com/weaigc/bingo/issues -
- - diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/commands/download.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/commands/download.py deleted file mode 100644 index 54247a78a654187206cd17a403913c6257ffcc7d..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/commands/download.py +++ /dev/null @@ -1,147 +0,0 @@ -import logging -import os -from optparse import Values -from typing import List - -from pip._internal.cli import cmdoptions -from pip._internal.cli.cmdoptions import make_target_python -from pip._internal.cli.req_command import RequirementCommand, with_cleanup -from pip._internal.cli.status_codes import SUCCESS -from pip._internal.operations.build.build_tracker import get_build_tracker -from pip._internal.req.req_install import check_legacy_setup_py_options -from pip._internal.utils.misc import ensure_dir, normalize_path, write_output -from pip._internal.utils.temp_dir import TempDirectory - -logger = logging.getLogger(__name__) - - -class DownloadCommand(RequirementCommand): - """ - Download packages from: - - - PyPI (and other indexes) using requirement specifiers. - - VCS project urls. - - Local project directories. - - Local or remote source archives. - - pip also supports downloading from "requirements files", which provide - an easy way to specify a whole environment to be downloaded. - """ - - usage = """ - %prog [options] [package-index-options] ... - %prog [options] -r [package-index-options] ... - %prog [options] ... - %prog [options] ... - %prog [options] ...""" - - def add_options(self) -> None: - self.cmd_opts.add_option(cmdoptions.constraints()) - self.cmd_opts.add_option(cmdoptions.requirements()) - self.cmd_opts.add_option(cmdoptions.no_deps()) - self.cmd_opts.add_option(cmdoptions.global_options()) - self.cmd_opts.add_option(cmdoptions.no_binary()) - self.cmd_opts.add_option(cmdoptions.only_binary()) - self.cmd_opts.add_option(cmdoptions.prefer_binary()) - self.cmd_opts.add_option(cmdoptions.src()) - self.cmd_opts.add_option(cmdoptions.pre()) - self.cmd_opts.add_option(cmdoptions.require_hashes()) - self.cmd_opts.add_option(cmdoptions.progress_bar()) - self.cmd_opts.add_option(cmdoptions.no_build_isolation()) - self.cmd_opts.add_option(cmdoptions.use_pep517()) - self.cmd_opts.add_option(cmdoptions.no_use_pep517()) - self.cmd_opts.add_option(cmdoptions.check_build_deps()) - self.cmd_opts.add_option(cmdoptions.ignore_requires_python()) - - self.cmd_opts.add_option( - "-d", - "--dest", - "--destination-dir", - "--destination-directory", - dest="download_dir", - metavar="dir", - default=os.curdir, - help="Download packages into .", - ) - - cmdoptions.add_target_python_options(self.cmd_opts) - - index_opts = cmdoptions.make_option_group( - cmdoptions.index_group, - self.parser, - ) - - self.parser.insert_option_group(0, index_opts) - self.parser.insert_option_group(0, self.cmd_opts) - - @with_cleanup - def run(self, options: Values, args: List[str]) -> int: - options.ignore_installed = True - # editable doesn't really make sense for `pip download`, but the bowels - # of the RequirementSet code require that property. - options.editables = [] - - cmdoptions.check_dist_restriction(options) - - options.download_dir = normalize_path(options.download_dir) - ensure_dir(options.download_dir) - - session = self.get_default_session(options) - - target_python = make_target_python(options) - finder = self._build_package_finder( - options=options, - session=session, - target_python=target_python, - ignore_requires_python=options.ignore_requires_python, - ) - - build_tracker = self.enter_context(get_build_tracker()) - - directory = TempDirectory( - delete=not options.no_clean, - kind="download", - globally_managed=True, - ) - - reqs = self.get_requirements(args, options, finder, session) - check_legacy_setup_py_options(options, reqs) - - preparer = self.make_requirement_preparer( - temp_build_dir=directory, - options=options, - build_tracker=build_tracker, - session=session, - finder=finder, - download_dir=options.download_dir, - use_user_site=False, - verbosity=self.verbosity, - ) - - resolver = self.make_resolver( - preparer=preparer, - finder=finder, - options=options, - ignore_requires_python=options.ignore_requires_python, - use_pep517=options.use_pep517, - py_version_info=options.python_version, - ) - - self.trace_basic_info(finder) - - requirement_set = resolver.resolve(reqs, check_supported_wheels=True) - - downloaded: List[str] = [] - for req in requirement_set.requirements.values(): - if req.satisfied_by is None: - assert req.name is not None - preparer.save_linked_requirement(req) - downloaded.append(req.name) - - preparer.prepare_linked_requirements_more(requirement_set.requirements.values()) - requirement_set.warn_legacy_versions_and_specifiers() - - if downloaded: - write_output("Successfully downloaded %s", " ".join(downloaded)) - - return SUCCESS diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/requests/_internal_utils.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/requests/_internal_utils.py deleted file mode 100644 index f2cf635e2937ee9b123a1498c5c5f723a6e20084..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/requests/_internal_utils.py +++ /dev/null @@ -1,50 +0,0 @@ -""" -requests._internal_utils -~~~~~~~~~~~~~~ - -Provides utility functions that are consumed internally by Requests -which depend on extremely few external helpers (such as compat) -""" -import re - -from .compat import builtin_str - -_VALID_HEADER_NAME_RE_BYTE = re.compile(rb"^[^:\s][^:\r\n]*$") -_VALID_HEADER_NAME_RE_STR = re.compile(r"^[^:\s][^:\r\n]*$") -_VALID_HEADER_VALUE_RE_BYTE = re.compile(rb"^\S[^\r\n]*$|^$") -_VALID_HEADER_VALUE_RE_STR = re.compile(r"^\S[^\r\n]*$|^$") - -_HEADER_VALIDATORS_STR = (_VALID_HEADER_NAME_RE_STR, _VALID_HEADER_VALUE_RE_STR) -_HEADER_VALIDATORS_BYTE = (_VALID_HEADER_NAME_RE_BYTE, _VALID_HEADER_VALUE_RE_BYTE) -HEADER_VALIDATORS = { - bytes: _HEADER_VALIDATORS_BYTE, - str: _HEADER_VALIDATORS_STR, -} - - -def to_native_string(string, encoding="ascii"): - """Given a string object, regardless of type, returns a representation of - that string in the native string type, encoding and decoding where - necessary. This assumes ASCII unless told otherwise. - """ - if isinstance(string, builtin_str): - out = string - else: - out = string.decode(encoding) - - return out - - -def unicode_is_ascii(u_string): - """Determine if unicode string only contains ASCII characters. - - :param str u_string: unicode string to check. Must be unicode - and not Python 2 `str`. - :rtype: bool - """ - assert isinstance(u_string, str) - try: - u_string.encode("ascii") - return True - except UnicodeEncodeError: - return False diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/msvc.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/msvc.py deleted file mode 100644 index 4a08dffe36eea0f1fbb9bad2fa9a106a6e13adc9..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/msvc.py +++ /dev/null @@ -1,1690 +0,0 @@ -""" -Improved support for Microsoft Visual C++ compilers. - -Known supported compilers: --------------------------- -Microsoft Visual C++ 14.X: - Microsoft Visual C++ Build Tools 2015 (x86, x64, arm) - Microsoft Visual Studio Build Tools 2017 (x86, x64, arm, arm64) - Microsoft Visual Studio Build Tools 2019 (x86, x64, arm, arm64) - -This may also support compilers shipped with compatible Visual Studio versions. -""" - -import json -from io import open -from os import listdir, pathsep -from os.path import join, isfile, isdir, dirname -from subprocess import CalledProcessError -import contextlib -import platform -import itertools -import subprocess -import distutils.errors -from setuptools.extern.more_itertools import unique_everseen - -if platform.system() == 'Windows': - import winreg - from os import environ -else: - # Mock winreg and environ so the module can be imported on this platform. - - class winreg: - HKEY_USERS = None - HKEY_CURRENT_USER = None - HKEY_LOCAL_MACHINE = None - HKEY_CLASSES_ROOT = None - - environ = dict() - - -def _msvc14_find_vc2015(): - """Python 3.8 "distutils/_msvccompiler.py" backport""" - try: - key = winreg.OpenKey( - winreg.HKEY_LOCAL_MACHINE, - r"Software\Microsoft\VisualStudio\SxS\VC7", - 0, - winreg.KEY_READ | winreg.KEY_WOW64_32KEY - ) - except OSError: - return None, None - - best_version = 0 - best_dir = None - with key: - for i in itertools.count(): - try: - v, vc_dir, vt = winreg.EnumValue(key, i) - except OSError: - break - if v and vt == winreg.REG_SZ and isdir(vc_dir): - try: - version = int(float(v)) - except (ValueError, TypeError): - continue - if version >= 14 and version > best_version: - best_version, best_dir = version, vc_dir - return best_version, best_dir - - -def _msvc14_find_vc2017(): - """Python 3.8 "distutils/_msvccompiler.py" backport - - Returns "15, path" based on the result of invoking vswhere.exe - If no install is found, returns "None, None" - - The version is returned to avoid unnecessarily changing the function - result. It may be ignored when the path is not None. - - If vswhere.exe is not available, by definition, VS 2017 is not - installed. - """ - root = environ.get("ProgramFiles(x86)") or environ.get("ProgramFiles") - if not root: - return None, None - - suitable_components = ( - "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", - "Microsoft.VisualStudio.Workload.WDExpress", - ) - - for component in suitable_components: - # Workaround for `-requiresAny` (only available on VS 2017 > 15.6) - with contextlib.suppress(CalledProcessError, OSError, UnicodeDecodeError): - path = subprocess.check_output([ - join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"), - "-latest", - "-prerelease", - "-requires", component, - "-property", "installationPath", - "-products", "*", - ]).decode(encoding="mbcs", errors="strict").strip() - - path = join(path, "VC", "Auxiliary", "Build") - if isdir(path): - return 15, path - - return None, None # no suitable component found - - -PLAT_SPEC_TO_RUNTIME = { - 'x86': 'x86', - 'x86_amd64': 'x64', - 'x86_arm': 'arm', - 'x86_arm64': 'arm64' -} - - -def _msvc14_find_vcvarsall(plat_spec): - """Python 3.8 "distutils/_msvccompiler.py" backport""" - _, best_dir = _msvc14_find_vc2017() - vcruntime = None - - if plat_spec in PLAT_SPEC_TO_RUNTIME: - vcruntime_plat = PLAT_SPEC_TO_RUNTIME[plat_spec] - else: - vcruntime_plat = 'x64' if 'amd64' in plat_spec else 'x86' - - if best_dir: - vcredist = join(best_dir, "..", "..", "redist", "MSVC", "**", - vcruntime_plat, "Microsoft.VC14*.CRT", - "vcruntime140.dll") - try: - import glob - vcruntime = glob.glob(vcredist, recursive=True)[-1] - except (ImportError, OSError, LookupError): - vcruntime = None - - if not best_dir: - best_version, best_dir = _msvc14_find_vc2015() - if best_version: - vcruntime = join(best_dir, 'redist', vcruntime_plat, - "Microsoft.VC140.CRT", "vcruntime140.dll") - - if not best_dir: - return None, None - - vcvarsall = join(best_dir, "vcvarsall.bat") - if not isfile(vcvarsall): - return None, None - - if not vcruntime or not isfile(vcruntime): - vcruntime = None - - return vcvarsall, vcruntime - - -def _msvc14_get_vc_env(plat_spec): - """Python 3.8 "distutils/_msvccompiler.py" backport""" - if "DISTUTILS_USE_SDK" in environ: - return { - key.lower(): value - for key, value in environ.items() - } - - vcvarsall, vcruntime = _msvc14_find_vcvarsall(plat_spec) - if not vcvarsall: - raise distutils.errors.DistutilsPlatformError( - "Unable to find vcvarsall.bat" - ) - - try: - out = subprocess.check_output( - 'cmd /u /c "{}" {} && set'.format(vcvarsall, plat_spec), - stderr=subprocess.STDOUT, - ).decode('utf-16le', errors='replace') - except subprocess.CalledProcessError as exc: - raise distutils.errors.DistutilsPlatformError( - "Error executing {}".format(exc.cmd) - ) from exc - - env = { - key.lower(): value - for key, _, value in - (line.partition('=') for line in out.splitlines()) - if key and value - } - - if vcruntime: - env['py_vcruntime_redist'] = vcruntime - return env - - -def msvc14_get_vc_env(plat_spec): - """ - Patched "distutils._msvccompiler._get_vc_env" for support extra - Microsoft Visual C++ 14.X compilers. - - Set environment without use of "vcvarsall.bat". - - Parameters - ---------- - plat_spec: str - Target architecture. - - Return - ------ - dict - environment - """ - - # Always use backport from CPython 3.8 - try: - return _msvc14_get_vc_env(plat_spec) - except distutils.errors.DistutilsPlatformError as exc: - _augment_exception(exc, 14.0) - raise - - -def _augment_exception(exc, version, arch=''): - """ - Add details to the exception message to help guide the user - as to what action will resolve it. - """ - # Error if MSVC++ directory not found or environment not set - message = exc.args[0] - - if "vcvarsall" in message.lower() or "visual c" in message.lower(): - # Special error message if MSVC++ not installed - tmpl = 'Microsoft Visual C++ {version:0.1f} or greater is required.' - message = tmpl.format(**locals()) - msdownload = 'www.microsoft.com/download/details.aspx?id=%d' - if version == 9.0: - if arch.lower().find('ia64') > -1: - # For VC++ 9.0, if IA64 support is needed, redirect user - # to Windows SDK 7.0. - # Note: No download link available from Microsoft. - message += ' Get it with "Microsoft Windows SDK 7.0"' - else: - # For VC++ 9.0 redirect user to Vc++ for Python 2.7 : - # This redirection link is maintained by Microsoft. - # Contact vspython@microsoft.com if it needs updating. - message += ' Get it from http://aka.ms/vcpython27' - elif version == 10.0: - # For VC++ 10.0 Redirect user to Windows SDK 7.1 - message += ' Get it with "Microsoft Windows SDK 7.1": ' - message += msdownload % 8279 - elif version >= 14.0: - # For VC++ 14.X Redirect user to latest Visual C++ Build Tools - message += (' Get it with "Microsoft C++ Build Tools": ' - r'https://visualstudio.microsoft.com' - r'/visual-cpp-build-tools/') - - exc.args = (message, ) - - -class PlatformInfo: - """ - Current and Target Architectures information. - - Parameters - ---------- - arch: str - Target architecture. - """ - current_cpu = environ.get('processor_architecture', '').lower() - - def __init__(self, arch): - self.arch = arch.lower().replace('x64', 'amd64') - - @property - def target_cpu(self): - """ - Return Target CPU architecture. - - Return - ------ - str - Target CPU - """ - return self.arch[self.arch.find('_') + 1:] - - def target_is_x86(self): - """ - Return True if target CPU is x86 32 bits.. - - Return - ------ - bool - CPU is x86 32 bits - """ - return self.target_cpu == 'x86' - - def current_is_x86(self): - """ - Return True if current CPU is x86 32 bits.. - - Return - ------ - bool - CPU is x86 32 bits - """ - return self.current_cpu == 'x86' - - def current_dir(self, hidex86=False, x64=False): - """ - Current platform specific subfolder. - - Parameters - ---------- - hidex86: bool - return '' and not '\x86' if architecture is x86. - x64: bool - return '\x64' and not '\amd64' if architecture is amd64. - - Return - ------ - str - subfolder: '\target', or '' (see hidex86 parameter) - """ - return ( - '' if (self.current_cpu == 'x86' and hidex86) else - r'\x64' if (self.current_cpu == 'amd64' and x64) else - r'\%s' % self.current_cpu - ) - - def target_dir(self, hidex86=False, x64=False): - r""" - Target platform specific subfolder. - - Parameters - ---------- - hidex86: bool - return '' and not '\x86' if architecture is x86. - x64: bool - return '\x64' and not '\amd64' if architecture is amd64. - - Return - ------ - str - subfolder: '\current', or '' (see hidex86 parameter) - """ - return ( - '' if (self.target_cpu == 'x86' and hidex86) else - r'\x64' if (self.target_cpu == 'amd64' and x64) else - r'\%s' % self.target_cpu - ) - - def cross_dir(self, forcex86=False): - r""" - Cross platform specific subfolder. - - Parameters - ---------- - forcex86: bool - Use 'x86' as current architecture even if current architecture is - not x86. - - Return - ------ - str - subfolder: '' if target architecture is current architecture, - '\current_target' if not. - """ - current = 'x86' if forcex86 else self.current_cpu - return ( - '' if self.target_cpu == current else - self.target_dir().replace('\\', '\\%s_' % current) - ) - - -class RegistryInfo: - """ - Microsoft Visual Studio related registry information. - - Parameters - ---------- - platform_info: PlatformInfo - "PlatformInfo" instance. - """ - HKEYS = (winreg.HKEY_USERS, - winreg.HKEY_CURRENT_USER, - winreg.HKEY_LOCAL_MACHINE, - winreg.HKEY_CLASSES_ROOT) - - def __init__(self, platform_info): - self.pi = platform_info - - @property - def visualstudio(self): - """ - Microsoft Visual Studio root registry key. - - Return - ------ - str - Registry key - """ - return 'VisualStudio' - - @property - def sxs(self): - """ - Microsoft Visual Studio SxS registry key. - - Return - ------ - str - Registry key - """ - return join(self.visualstudio, 'SxS') - - @property - def vc(self): - """ - Microsoft Visual C++ VC7 registry key. - - Return - ------ - str - Registry key - """ - return join(self.sxs, 'VC7') - - @property - def vs(self): - """ - Microsoft Visual Studio VS7 registry key. - - Return - ------ - str - Registry key - """ - return join(self.sxs, 'VS7') - - @property - def vc_for_python(self): - """ - Microsoft Visual C++ for Python registry key. - - Return - ------ - str - Registry key - """ - return r'DevDiv\VCForPython' - - @property - def microsoft_sdk(self): - """ - Microsoft SDK registry key. - - Return - ------ - str - Registry key - """ - return 'Microsoft SDKs' - - @property - def windows_sdk(self): - """ - Microsoft Windows/Platform SDK registry key. - - Return - ------ - str - Registry key - """ - return join(self.microsoft_sdk, 'Windows') - - @property - def netfx_sdk(self): - """ - Microsoft .NET Framework SDK registry key. - - Return - ------ - str - Registry key - """ - return join(self.microsoft_sdk, 'NETFXSDK') - - @property - def windows_kits_roots(self): - """ - Microsoft Windows Kits Roots registry key. - - Return - ------ - str - Registry key - """ - return r'Windows Kits\Installed Roots' - - def microsoft(self, key, x86=False): - """ - Return key in Microsoft software registry. - - Parameters - ---------- - key: str - Registry key path where look. - x86: str - Force x86 software registry. - - Return - ------ - str - Registry key - """ - node64 = '' if self.pi.current_is_x86() or x86 else 'Wow6432Node' - return join('Software', node64, 'Microsoft', key) - - def lookup(self, key, name): - """ - Look for values in registry in Microsoft software registry. - - Parameters - ---------- - key: str - Registry key path where look. - name: str - Value name to find. - - Return - ------ - str - value - """ - key_read = winreg.KEY_READ - openkey = winreg.OpenKey - closekey = winreg.CloseKey - ms = self.microsoft - for hkey in self.HKEYS: - bkey = None - try: - bkey = openkey(hkey, ms(key), 0, key_read) - except (OSError, IOError): - if not self.pi.current_is_x86(): - try: - bkey = openkey(hkey, ms(key, True), 0, key_read) - except (OSError, IOError): - continue - else: - continue - try: - return winreg.QueryValueEx(bkey, name)[0] - except (OSError, IOError): - pass - finally: - if bkey: - closekey(bkey) - - -class SystemInfo: - """ - Microsoft Windows and Visual Studio related system information. - - Parameters - ---------- - registry_info: RegistryInfo - "RegistryInfo" instance. - vc_ver: float - Required Microsoft Visual C++ version. - """ - - # Variables and properties in this class use originals CamelCase variables - # names from Microsoft source files for more easy comparison. - WinDir = environ.get('WinDir', '') - ProgramFiles = environ.get('ProgramFiles', '') - ProgramFilesx86 = environ.get('ProgramFiles(x86)', ProgramFiles) - - def __init__(self, registry_info, vc_ver=None): - self.ri = registry_info - self.pi = self.ri.pi - - self.known_vs_paths = self.find_programdata_vs_vers() - - # Except for VS15+, VC version is aligned with VS version - self.vs_ver = self.vc_ver = ( - vc_ver or self._find_latest_available_vs_ver()) - - def _find_latest_available_vs_ver(self): - """ - Find the latest VC version - - Return - ------ - float - version - """ - reg_vc_vers = self.find_reg_vs_vers() - - if not (reg_vc_vers or self.known_vs_paths): - raise distutils.errors.DistutilsPlatformError( - 'No Microsoft Visual C++ version found') - - vc_vers = set(reg_vc_vers) - vc_vers.update(self.known_vs_paths) - return sorted(vc_vers)[-1] - - def find_reg_vs_vers(self): - """ - Find Microsoft Visual Studio versions available in registry. - - Return - ------ - list of float - Versions - """ - ms = self.ri.microsoft - vckeys = (self.ri.vc, self.ri.vc_for_python, self.ri.vs) - vs_vers = [] - for hkey, key in itertools.product(self.ri.HKEYS, vckeys): - try: - bkey = winreg.OpenKey(hkey, ms(key), 0, winreg.KEY_READ) - except (OSError, IOError): - continue - with bkey: - subkeys, values, _ = winreg.QueryInfoKey(bkey) - for i in range(values): - with contextlib.suppress(ValueError): - ver = float(winreg.EnumValue(bkey, i)[0]) - if ver not in vs_vers: - vs_vers.append(ver) - for i in range(subkeys): - with contextlib.suppress(ValueError): - ver = float(winreg.EnumKey(bkey, i)) - if ver not in vs_vers: - vs_vers.append(ver) - return sorted(vs_vers) - - def find_programdata_vs_vers(self): - r""" - Find Visual studio 2017+ versions from information in - "C:\ProgramData\Microsoft\VisualStudio\Packages\_Instances". - - Return - ------ - dict - float version as key, path as value. - """ - vs_versions = {} - instances_dir = \ - r'C:\ProgramData\Microsoft\VisualStudio\Packages\_Instances' - - try: - hashed_names = listdir(instances_dir) - - except (OSError, IOError): - # Directory not exists with all Visual Studio versions - return vs_versions - - for name in hashed_names: - try: - # Get VS installation path from "state.json" file - state_path = join(instances_dir, name, 'state.json') - with open(state_path, 'rt', encoding='utf-8') as state_file: - state = json.load(state_file) - vs_path = state['installationPath'] - - # Raises OSError if this VS installation does not contain VC - listdir(join(vs_path, r'VC\Tools\MSVC')) - - # Store version and path - vs_versions[self._as_float_version( - state['installationVersion'])] = vs_path - - except (OSError, IOError, KeyError): - # Skip if "state.json" file is missing or bad format - continue - - return vs_versions - - @staticmethod - def _as_float_version(version): - """ - Return a string version as a simplified float version (major.minor) - - Parameters - ---------- - version: str - Version. - - Return - ------ - float - version - """ - return float('.'.join(version.split('.')[:2])) - - @property - def VSInstallDir(self): - """ - Microsoft Visual Studio directory. - - Return - ------ - str - path - """ - # Default path - default = join(self.ProgramFilesx86, - 'Microsoft Visual Studio %0.1f' % self.vs_ver) - - # Try to get path from registry, if fail use default path - return self.ri.lookup(self.ri.vs, '%0.1f' % self.vs_ver) or default - - @property - def VCInstallDir(self): - """ - Microsoft Visual C++ directory. - - Return - ------ - str - path - """ - path = self._guess_vc() or self._guess_vc_legacy() - - if not isdir(path): - msg = 'Microsoft Visual C++ directory not found' - raise distutils.errors.DistutilsPlatformError(msg) - - return path - - def _guess_vc(self): - """ - Locate Visual C++ for VS2017+. - - Return - ------ - str - path - """ - if self.vs_ver <= 14.0: - return '' - - try: - # First search in known VS paths - vs_dir = self.known_vs_paths[self.vs_ver] - except KeyError: - # Else, search with path from registry - vs_dir = self.VSInstallDir - - guess_vc = join(vs_dir, r'VC\Tools\MSVC') - - # Subdir with VC exact version as name - try: - # Update the VC version with real one instead of VS version - vc_ver = listdir(guess_vc)[-1] - self.vc_ver = self._as_float_version(vc_ver) - return join(guess_vc, vc_ver) - except (OSError, IOError, IndexError): - return '' - - def _guess_vc_legacy(self): - """ - Locate Visual C++ for versions prior to 2017. - - Return - ------ - str - path - """ - default = join(self.ProgramFilesx86, - r'Microsoft Visual Studio %0.1f\VC' % self.vs_ver) - - # Try to get "VC++ for Python" path from registry as default path - reg_path = join(self.ri.vc_for_python, '%0.1f' % self.vs_ver) - python_vc = self.ri.lookup(reg_path, 'installdir') - default_vc = join(python_vc, 'VC') if python_vc else default - - # Try to get path from registry, if fail use default path - return self.ri.lookup(self.ri.vc, '%0.1f' % self.vs_ver) or default_vc - - @property - def WindowsSdkVersion(self): - """ - Microsoft Windows SDK versions for specified MSVC++ version. - - Return - ------ - tuple of str - versions - """ - if self.vs_ver <= 9.0: - return '7.0', '6.1', '6.0a' - elif self.vs_ver == 10.0: - return '7.1', '7.0a' - elif self.vs_ver == 11.0: - return '8.0', '8.0a' - elif self.vs_ver == 12.0: - return '8.1', '8.1a' - elif self.vs_ver >= 14.0: - return '10.0', '8.1' - - @property - def WindowsSdkLastVersion(self): - """ - Microsoft Windows SDK last version. - - Return - ------ - str - version - """ - return self._use_last_dir_name(join(self.WindowsSdkDir, 'lib')) - - @property # noqa: C901 - def WindowsSdkDir(self): # noqa: C901 # is too complex (12) # FIXME - """ - Microsoft Windows SDK directory. - - Return - ------ - str - path - """ - sdkdir = '' - for ver in self.WindowsSdkVersion: - # Try to get it from registry - loc = join(self.ri.windows_sdk, 'v%s' % ver) - sdkdir = self.ri.lookup(loc, 'installationfolder') - if sdkdir: - break - if not sdkdir or not isdir(sdkdir): - # Try to get "VC++ for Python" version from registry - path = join(self.ri.vc_for_python, '%0.1f' % self.vc_ver) - install_base = self.ri.lookup(path, 'installdir') - if install_base: - sdkdir = join(install_base, 'WinSDK') - if not sdkdir or not isdir(sdkdir): - # If fail, use default new path - for ver in self.WindowsSdkVersion: - intver = ver[:ver.rfind('.')] - path = r'Microsoft SDKs\Windows Kits\%s' % intver - d = join(self.ProgramFiles, path) - if isdir(d): - sdkdir = d - if not sdkdir or not isdir(sdkdir): - # If fail, use default old path - for ver in self.WindowsSdkVersion: - path = r'Microsoft SDKs\Windows\v%s' % ver - d = join(self.ProgramFiles, path) - if isdir(d): - sdkdir = d - if not sdkdir: - # If fail, use Platform SDK - sdkdir = join(self.VCInstallDir, 'PlatformSDK') - return sdkdir - - @property - def WindowsSDKExecutablePath(self): - """ - Microsoft Windows SDK executable directory. - - Return - ------ - str - path - """ - # Find WinSDK NetFx Tools registry dir name - if self.vs_ver <= 11.0: - netfxver = 35 - arch = '' - else: - netfxver = 40 - hidex86 = True if self.vs_ver <= 12.0 else False - arch = self.pi.current_dir(x64=True, hidex86=hidex86) - fx = 'WinSDK-NetFx%dTools%s' % (netfxver, arch.replace('\\', '-')) - - # list all possibles registry paths - regpaths = [] - if self.vs_ver >= 14.0: - for ver in self.NetFxSdkVersion: - regpaths += [join(self.ri.netfx_sdk, ver, fx)] - - for ver in self.WindowsSdkVersion: - regpaths += [join(self.ri.windows_sdk, 'v%sA' % ver, fx)] - - # Return installation folder from the more recent path - for path in regpaths: - execpath = self.ri.lookup(path, 'installationfolder') - if execpath: - return execpath - - @property - def FSharpInstallDir(self): - """ - Microsoft Visual F# directory. - - Return - ------ - str - path - """ - path = join(self.ri.visualstudio, r'%0.1f\Setup\F#' % self.vs_ver) - return self.ri.lookup(path, 'productdir') or '' - - @property - def UniversalCRTSdkDir(self): - """ - Microsoft Universal CRT SDK directory. - - Return - ------ - str - path - """ - # Set Kit Roots versions for specified MSVC++ version - vers = ('10', '81') if self.vs_ver >= 14.0 else () - - # Find path of the more recent Kit - for ver in vers: - sdkdir = self.ri.lookup(self.ri.windows_kits_roots, - 'kitsroot%s' % ver) - if sdkdir: - return sdkdir or '' - - @property - def UniversalCRTSdkLastVersion(self): - """ - Microsoft Universal C Runtime SDK last version. - - Return - ------ - str - version - """ - return self._use_last_dir_name(join(self.UniversalCRTSdkDir, 'lib')) - - @property - def NetFxSdkVersion(self): - """ - Microsoft .NET Framework SDK versions. - - Return - ------ - tuple of str - versions - """ - # Set FxSdk versions for specified VS version - return (('4.7.2', '4.7.1', '4.7', - '4.6.2', '4.6.1', '4.6', - '4.5.2', '4.5.1', '4.5') - if self.vs_ver >= 14.0 else ()) - - @property - def NetFxSdkDir(self): - """ - Microsoft .NET Framework SDK directory. - - Return - ------ - str - path - """ - sdkdir = '' - for ver in self.NetFxSdkVersion: - loc = join(self.ri.netfx_sdk, ver) - sdkdir = self.ri.lookup(loc, 'kitsinstallationfolder') - if sdkdir: - break - return sdkdir - - @property - def FrameworkDir32(self): - """ - Microsoft .NET Framework 32bit directory. - - Return - ------ - str - path - """ - # Default path - guess_fw = join(self.WinDir, r'Microsoft.NET\Framework') - - # Try to get path from registry, if fail use default path - return self.ri.lookup(self.ri.vc, 'frameworkdir32') or guess_fw - - @property - def FrameworkDir64(self): - """ - Microsoft .NET Framework 64bit directory. - - Return - ------ - str - path - """ - # Default path - guess_fw = join(self.WinDir, r'Microsoft.NET\Framework64') - - # Try to get path from registry, if fail use default path - return self.ri.lookup(self.ri.vc, 'frameworkdir64') or guess_fw - - @property - def FrameworkVersion32(self): - """ - Microsoft .NET Framework 32bit versions. - - Return - ------ - tuple of str - versions - """ - return self._find_dot_net_versions(32) - - @property - def FrameworkVersion64(self): - """ - Microsoft .NET Framework 64bit versions. - - Return - ------ - tuple of str - versions - """ - return self._find_dot_net_versions(64) - - def _find_dot_net_versions(self, bits): - """ - Find Microsoft .NET Framework versions. - - Parameters - ---------- - bits: int - Platform number of bits: 32 or 64. - - Return - ------ - tuple of str - versions - """ - # Find actual .NET version in registry - reg_ver = self.ri.lookup(self.ri.vc, 'frameworkver%d' % bits) - dot_net_dir = getattr(self, 'FrameworkDir%d' % bits) - ver = reg_ver or self._use_last_dir_name(dot_net_dir, 'v') or '' - - # Set .NET versions for specified MSVC++ version - if self.vs_ver >= 12.0: - return ver, 'v4.0' - elif self.vs_ver >= 10.0: - return 'v4.0.30319' if ver.lower()[:2] != 'v4' else ver, 'v3.5' - elif self.vs_ver == 9.0: - return 'v3.5', 'v2.0.50727' - elif self.vs_ver == 8.0: - return 'v3.0', 'v2.0.50727' - - @staticmethod - def _use_last_dir_name(path, prefix=''): - """ - Return name of the last dir in path or '' if no dir found. - - Parameters - ---------- - path: str - Use dirs in this path - prefix: str - Use only dirs starting by this prefix - - Return - ------ - str - name - """ - matching_dirs = ( - dir_name - for dir_name in reversed(listdir(path)) - if isdir(join(path, dir_name)) and - dir_name.startswith(prefix) - ) - return next(matching_dirs, None) or '' - - -class EnvironmentInfo: - """ - Return environment variables for specified Microsoft Visual C++ version - and platform : Lib, Include, Path and libpath. - - This function is compatible with Microsoft Visual C++ 9.0 to 14.X. - - Script created by analysing Microsoft environment configuration files like - "vcvars[...].bat", "SetEnv.Cmd", "vcbuildtools.bat", ... - - Parameters - ---------- - arch: str - Target architecture. - vc_ver: float - Required Microsoft Visual C++ version. If not set, autodetect the last - version. - vc_min_ver: float - Minimum Microsoft Visual C++ version. - """ - - # Variables and properties in this class use originals CamelCase variables - # names from Microsoft source files for more easy comparison. - - def __init__(self, arch, vc_ver=None, vc_min_ver=0): - self.pi = PlatformInfo(arch) - self.ri = RegistryInfo(self.pi) - self.si = SystemInfo(self.ri, vc_ver) - - if self.vc_ver < vc_min_ver: - err = 'No suitable Microsoft Visual C++ version found' - raise distutils.errors.DistutilsPlatformError(err) - - @property - def vs_ver(self): - """ - Microsoft Visual Studio. - - Return - ------ - float - version - """ - return self.si.vs_ver - - @property - def vc_ver(self): - """ - Microsoft Visual C++ version. - - Return - ------ - float - version - """ - return self.si.vc_ver - - @property - def VSTools(self): - """ - Microsoft Visual Studio Tools. - - Return - ------ - list of str - paths - """ - paths = [r'Common7\IDE', r'Common7\Tools'] - - if self.vs_ver >= 14.0: - arch_subdir = self.pi.current_dir(hidex86=True, x64=True) - paths += [r'Common7\IDE\CommonExtensions\Microsoft\TestWindow'] - paths += [r'Team Tools\Performance Tools'] - paths += [r'Team Tools\Performance Tools%s' % arch_subdir] - - return [join(self.si.VSInstallDir, path) for path in paths] - - @property - def VCIncludes(self): - """ - Microsoft Visual C++ & Microsoft Foundation Class Includes. - - Return - ------ - list of str - paths - """ - return [join(self.si.VCInstallDir, 'Include'), - join(self.si.VCInstallDir, r'ATLMFC\Include')] - - @property - def VCLibraries(self): - """ - Microsoft Visual C++ & Microsoft Foundation Class Libraries. - - Return - ------ - list of str - paths - """ - if self.vs_ver >= 15.0: - arch_subdir = self.pi.target_dir(x64=True) - else: - arch_subdir = self.pi.target_dir(hidex86=True) - paths = ['Lib%s' % arch_subdir, r'ATLMFC\Lib%s' % arch_subdir] - - if self.vs_ver >= 14.0: - paths += [r'Lib\store%s' % arch_subdir] - - return [join(self.si.VCInstallDir, path) for path in paths] - - @property - def VCStoreRefs(self): - """ - Microsoft Visual C++ store references Libraries. - - Return - ------ - list of str - paths - """ - if self.vs_ver < 14.0: - return [] - return [join(self.si.VCInstallDir, r'Lib\store\references')] - - @property - def VCTools(self): - """ - Microsoft Visual C++ Tools. - - Return - ------ - list of str - paths - """ - si = self.si - tools = [join(si.VCInstallDir, 'VCPackages')] - - forcex86 = True if self.vs_ver <= 10.0 else False - arch_subdir = self.pi.cross_dir(forcex86) - if arch_subdir: - tools += [join(si.VCInstallDir, 'Bin%s' % arch_subdir)] - - if self.vs_ver == 14.0: - path = 'Bin%s' % self.pi.current_dir(hidex86=True) - tools += [join(si.VCInstallDir, path)] - - elif self.vs_ver >= 15.0: - host_dir = (r'bin\HostX86%s' if self.pi.current_is_x86() else - r'bin\HostX64%s') - tools += [join( - si.VCInstallDir, host_dir % self.pi.target_dir(x64=True))] - - if self.pi.current_cpu != self.pi.target_cpu: - tools += [join( - si.VCInstallDir, host_dir % self.pi.current_dir(x64=True))] - - else: - tools += [join(si.VCInstallDir, 'Bin')] - - return tools - - @property - def OSLibraries(self): - """ - Microsoft Windows SDK Libraries. - - Return - ------ - list of str - paths - """ - if self.vs_ver <= 10.0: - arch_subdir = self.pi.target_dir(hidex86=True, x64=True) - return [join(self.si.WindowsSdkDir, 'Lib%s' % arch_subdir)] - - else: - arch_subdir = self.pi.target_dir(x64=True) - lib = join(self.si.WindowsSdkDir, 'lib') - libver = self._sdk_subdir - return [join(lib, '%sum%s' % (libver, arch_subdir))] - - @property - def OSIncludes(self): - """ - Microsoft Windows SDK Include. - - Return - ------ - list of str - paths - """ - include = join(self.si.WindowsSdkDir, 'include') - - if self.vs_ver <= 10.0: - return [include, join(include, 'gl')] - - else: - if self.vs_ver >= 14.0: - sdkver = self._sdk_subdir - else: - sdkver = '' - return [join(include, '%sshared' % sdkver), - join(include, '%sum' % sdkver), - join(include, '%swinrt' % sdkver)] - - @property - def OSLibpath(self): - """ - Microsoft Windows SDK Libraries Paths. - - Return - ------ - list of str - paths - """ - ref = join(self.si.WindowsSdkDir, 'References') - libpath = [] - - if self.vs_ver <= 9.0: - libpath += self.OSLibraries - - if self.vs_ver >= 11.0: - libpath += [join(ref, r'CommonConfiguration\Neutral')] - - if self.vs_ver >= 14.0: - libpath += [ - ref, - join(self.si.WindowsSdkDir, 'UnionMetadata'), - join( - ref, 'Windows.Foundation.UniversalApiContract', '1.0.0.0'), - join(ref, 'Windows.Foundation.FoundationContract', '1.0.0.0'), - join( - ref, 'Windows.Networking.Connectivity.WwanContract', - '1.0.0.0'), - join( - self.si.WindowsSdkDir, 'ExtensionSDKs', 'Microsoft.VCLibs', - '%0.1f' % self.vs_ver, 'References', 'CommonConfiguration', - 'neutral'), - ] - return libpath - - @property - def SdkTools(self): - """ - Microsoft Windows SDK Tools. - - Return - ------ - list of str - paths - """ - return list(self._sdk_tools()) - - def _sdk_tools(self): - """ - Microsoft Windows SDK Tools paths generator. - - Return - ------ - generator of str - paths - """ - if self.vs_ver < 15.0: - bin_dir = 'Bin' if self.vs_ver <= 11.0 else r'Bin\x86' - yield join(self.si.WindowsSdkDir, bin_dir) - - if not self.pi.current_is_x86(): - arch_subdir = self.pi.current_dir(x64=True) - path = 'Bin%s' % arch_subdir - yield join(self.si.WindowsSdkDir, path) - - if self.vs_ver in (10.0, 11.0): - if self.pi.target_is_x86(): - arch_subdir = '' - else: - arch_subdir = self.pi.current_dir(hidex86=True, x64=True) - path = r'Bin\NETFX 4.0 Tools%s' % arch_subdir - yield join(self.si.WindowsSdkDir, path) - - elif self.vs_ver >= 15.0: - path = join(self.si.WindowsSdkDir, 'Bin') - arch_subdir = self.pi.current_dir(x64=True) - sdkver = self.si.WindowsSdkLastVersion - yield join(path, '%s%s' % (sdkver, arch_subdir)) - - if self.si.WindowsSDKExecutablePath: - yield self.si.WindowsSDKExecutablePath - - @property - def _sdk_subdir(self): - """ - Microsoft Windows SDK version subdir. - - Return - ------ - str - subdir - """ - ucrtver = self.si.WindowsSdkLastVersion - return ('%s\\' % ucrtver) if ucrtver else '' - - @property - def SdkSetup(self): - """ - Microsoft Windows SDK Setup. - - Return - ------ - list of str - paths - """ - if self.vs_ver > 9.0: - return [] - - return [join(self.si.WindowsSdkDir, 'Setup')] - - @property - def FxTools(self): - """ - Microsoft .NET Framework Tools. - - Return - ------ - list of str - paths - """ - pi = self.pi - si = self.si - - if self.vs_ver <= 10.0: - include32 = True - include64 = not pi.target_is_x86() and not pi.current_is_x86() - else: - include32 = pi.target_is_x86() or pi.current_is_x86() - include64 = pi.current_cpu == 'amd64' or pi.target_cpu == 'amd64' - - tools = [] - if include32: - tools += [join(si.FrameworkDir32, ver) - for ver in si.FrameworkVersion32] - if include64: - tools += [join(si.FrameworkDir64, ver) - for ver in si.FrameworkVersion64] - return tools - - @property - def NetFxSDKLibraries(self): - """ - Microsoft .Net Framework SDK Libraries. - - Return - ------ - list of str - paths - """ - if self.vs_ver < 14.0 or not self.si.NetFxSdkDir: - return [] - - arch_subdir = self.pi.target_dir(x64=True) - return [join(self.si.NetFxSdkDir, r'lib\um%s' % arch_subdir)] - - @property - def NetFxSDKIncludes(self): - """ - Microsoft .Net Framework SDK Includes. - - Return - ------ - list of str - paths - """ - if self.vs_ver < 14.0 or not self.si.NetFxSdkDir: - return [] - - return [join(self.si.NetFxSdkDir, r'include\um')] - - @property - def VsTDb(self): - """ - Microsoft Visual Studio Team System Database. - - Return - ------ - list of str - paths - """ - return [join(self.si.VSInstallDir, r'VSTSDB\Deploy')] - - @property - def MSBuild(self): - """ - Microsoft Build Engine. - - Return - ------ - list of str - paths - """ - if self.vs_ver < 12.0: - return [] - elif self.vs_ver < 15.0: - base_path = self.si.ProgramFilesx86 - arch_subdir = self.pi.current_dir(hidex86=True) - else: - base_path = self.si.VSInstallDir - arch_subdir = '' - - path = r'MSBuild\%0.1f\bin%s' % (self.vs_ver, arch_subdir) - build = [join(base_path, path)] - - if self.vs_ver >= 15.0: - # Add Roslyn C# & Visual Basic Compiler - build += [join(base_path, path, 'Roslyn')] - - return build - - @property - def HTMLHelpWorkshop(self): - """ - Microsoft HTML Help Workshop. - - Return - ------ - list of str - paths - """ - if self.vs_ver < 11.0: - return [] - - return [join(self.si.ProgramFilesx86, 'HTML Help Workshop')] - - @property - def UCRTLibraries(self): - """ - Microsoft Universal C Runtime SDK Libraries. - - Return - ------ - list of str - paths - """ - if self.vs_ver < 14.0: - return [] - - arch_subdir = self.pi.target_dir(x64=True) - lib = join(self.si.UniversalCRTSdkDir, 'lib') - ucrtver = self._ucrt_subdir - return [join(lib, '%sucrt%s' % (ucrtver, arch_subdir))] - - @property - def UCRTIncludes(self): - """ - Microsoft Universal C Runtime SDK Include. - - Return - ------ - list of str - paths - """ - if self.vs_ver < 14.0: - return [] - - include = join(self.si.UniversalCRTSdkDir, 'include') - return [join(include, '%sucrt' % self._ucrt_subdir)] - - @property - def _ucrt_subdir(self): - """ - Microsoft Universal C Runtime SDK version subdir. - - Return - ------ - str - subdir - """ - ucrtver = self.si.UniversalCRTSdkLastVersion - return ('%s\\' % ucrtver) if ucrtver else '' - - @property - def FSharp(self): - """ - Microsoft Visual F#. - - Return - ------ - list of str - paths - """ - if 11.0 > self.vs_ver > 12.0: - return [] - - return [self.si.FSharpInstallDir] - - @property - def VCRuntimeRedist(self): - """ - Microsoft Visual C++ runtime redistributable dll. - - Return - ------ - str - path - """ - vcruntime = 'vcruntime%d0.dll' % self.vc_ver - arch_subdir = self.pi.target_dir(x64=True).strip('\\') - - # Installation prefixes candidates - prefixes = [] - tools_path = self.si.VCInstallDir - redist_path = dirname(tools_path.replace(r'\Tools', r'\Redist')) - if isdir(redist_path): - # Redist version may not be exactly the same as tools - redist_path = join(redist_path, listdir(redist_path)[-1]) - prefixes += [redist_path, join(redist_path, 'onecore')] - - prefixes += [join(tools_path, 'redist')] # VS14 legacy path - - # CRT directory - crt_dirs = ('Microsoft.VC%d.CRT' % (self.vc_ver * 10), - # Sometime store in directory with VS version instead of VC - 'Microsoft.VC%d.CRT' % (int(self.vs_ver) * 10)) - - # vcruntime path - for prefix, crt_dir in itertools.product(prefixes, crt_dirs): - path = join(prefix, arch_subdir, crt_dir, vcruntime) - if isfile(path): - return path - - def return_env(self, exists=True): - """ - Return environment dict. - - Parameters - ---------- - exists: bool - It True, only return existing paths. - - Return - ------ - dict - environment - """ - env = dict( - include=self._build_paths('include', - [self.VCIncludes, - self.OSIncludes, - self.UCRTIncludes, - self.NetFxSDKIncludes], - exists), - lib=self._build_paths('lib', - [self.VCLibraries, - self.OSLibraries, - self.FxTools, - self.UCRTLibraries, - self.NetFxSDKLibraries], - exists), - libpath=self._build_paths('libpath', - [self.VCLibraries, - self.FxTools, - self.VCStoreRefs, - self.OSLibpath], - exists), - path=self._build_paths('path', - [self.VCTools, - self.VSTools, - self.VsTDb, - self.SdkTools, - self.SdkSetup, - self.FxTools, - self.MSBuild, - self.HTMLHelpWorkshop, - self.FSharp], - exists), - ) - if self.vs_ver >= 14 and isfile(self.VCRuntimeRedist): - env['py_vcruntime_redist'] = self.VCRuntimeRedist - return env - - def _build_paths(self, name, spec_path_lists, exists): - """ - Given an environment variable name and specified paths, - return a pathsep-separated string of paths containing - unique, extant, directories from those paths and from - the environment variable. Raise an error if no paths - are resolved. - - Parameters - ---------- - name: str - Environment variable name - spec_path_lists: list of str - Paths - exists: bool - It True, only return existing paths. - - Return - ------ - str - Pathsep-separated paths - """ - # flatten spec_path_lists - spec_paths = itertools.chain.from_iterable(spec_path_lists) - env_paths = environ.get(name, '').split(pathsep) - paths = itertools.chain(spec_paths, env_paths) - extant_paths = list(filter(isdir, paths)) if exists else paths - if not extant_paths: - msg = "%s environment variable is empty" % name.upper() - raise distutils.errors.DistutilsPlatformError(msg) - unique_paths = unique_everseen(extant_paths) - return pathsep.join(unique_paths) diff --git a/spaces/TechShark20/handwespeak/spoter/utils.py b/spaces/TechShark20/handwespeak/spoter/utils.py deleted file mode 100644 index af402265c74fc092a1a03ef6e90b5c9ad3f1934b..0000000000000000000000000000000000000000 --- a/spaces/TechShark20/handwespeak/spoter/utils.py +++ /dev/null @@ -1,81 +0,0 @@ - -import logging -import torch - - -def train_epoch(model, dataloader, criterion, optimizer, device, scheduler=None): - - pred_correct, pred_all = 0, 0 - running_loss = 0.0 - - for i, data in enumerate(dataloader): - inputs, labels = data - inputs = inputs.squeeze(0).to(device) - labels = labels.to(device, dtype=torch.long) - - optimizer.zero_grad() - outputs = model(inputs).expand(1, -1, -1) - - loss = criterion(outputs[0], labels[0]) - loss.backward() - optimizer.step() - running_loss += loss - - # Statistics - if int(torch.argmax(torch.nn.functional.softmax(outputs, dim=2))) == int(labels[0][0]): - pred_correct += 1 - pred_all += 1 - - if scheduler: - scheduler.step(running_loss.item() / len(dataloader)) - - return running_loss, pred_correct, pred_all, (pred_correct / pred_all) - - -def evaluate(model, dataloader, device, print_stats=False): - - pred_correct, pred_all = 0, 0 - stats = {i: [0, 0] for i in range(101)} - - for i, data in enumerate(dataloader): - inputs, labels = data - inputs = inputs.squeeze(0).to(device) - labels = labels.to(device, dtype=torch.long) - - outputs = model(inputs).expand(1, -1, -1) - - # Statistics - if int(torch.argmax(torch.nn.functional.softmax(outputs, dim=2))) == int(labels[0][0]): - stats[int(labels[0][0])][0] += 1 - pred_correct += 1 - - stats[int(labels[0][0])][1] += 1 - pred_all += 1 - - if print_stats: - stats = {key: value[0] / value[1] for key, value in stats.items() if value[1] != 0} - print("Label accuracies statistics:") - print(str(stats) + "\n") - logging.info("Label accuracies statistics:") - logging.info(str(stats) + "\n") - - return pred_correct, pred_all, (pred_correct / pred_all) - - -def evaluate_top_k(model, dataloader, device, k=5): - - pred_correct, pred_all = 0, 0 - - for i, data in enumerate(dataloader): - inputs, labels = data - inputs = inputs.squeeze(0).to(device) - labels = labels.to(device, dtype=torch.long) - - outputs = model(inputs).expand(1, -1, -1) - - if int(labels[0][0]) in torch.topk(outputs, k).indices.tolist(): - pred_correct += 1 - - pred_all += 1 - - return pred_correct, pred_all, (pred_correct / pred_all) diff --git a/spaces/TempoFunk/makeavid-sd-jax/makeavid_sd/flax_impl/__init__.py b/spaces/TempoFunk/makeavid-sd-jax/makeavid_sd/flax_impl/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/layers/__init__.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/layers/__init__.py deleted file mode 100644 index 3d015c530b3e33de8ea60943a0a98b135f013dd7..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/layers/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from .batch_norm import FrozenBatchNorm2d, get_norm, NaiveSyncBatchNorm, CycleBatchNormList -from .deform_conv import DeformConv, ModulatedDeformConv -from .mask_ops import paste_masks_in_image -from .nms import batched_nms, batched_nms_rotated, nms, nms_rotated -from .roi_align import ROIAlign, roi_align -from .roi_align_rotated import ROIAlignRotated, roi_align_rotated -from .shape_spec import ShapeSpec -from .wrappers import ( - BatchNorm2d, - Conv2d, - ConvTranspose2d, - cat, - interpolate, - Linear, - nonzero_tuple, - cross_entropy, - shapes_to_tensor, -) -from .blocks import CNNBlockBase, DepthwiseSeparableConv2d -from .aspp import ASPP -from .losses import ciou_loss, diou_loss - -__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/spaces/TheStinger/Ilaria_RVC/app.py b/spaces/TheStinger/Ilaria_RVC/app.py deleted file mode 100644 index 4184e9e92e99e624b9d021ee6223add03621c6ac..0000000000000000000000000000000000000000 --- a/spaces/TheStinger/Ilaria_RVC/app.py +++ /dev/null @@ -1,2088 +0,0 @@ -import subprocess, torch, os, traceback, sys, warnings, shutil, numpy as np -from mega import Mega -os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1" -import threading -from time import sleep -from subprocess import Popen -import faiss -from random import shuffle -import json, datetime, requests -from gtts import gTTS -now_dir = os.getcwd() -sys.path.append(now_dir) -tmp = os.path.join(now_dir, "TEMP") -shutil.rmtree(tmp, ignore_errors=True) -shutil.rmtree("%s/runtime/Lib/site-packages/infer_pack" % (now_dir), ignore_errors=True) -os.makedirs(tmp, exist_ok=True) -os.makedirs(os.path.join(now_dir, "logs"), exist_ok=True) -os.makedirs(os.path.join(now_dir, "weights"), exist_ok=True) -os.environ["TEMP"] = tmp -warnings.filterwarnings("ignore") -torch.manual_seed(114514) -from i18n import I18nAuto - -import signal - -import math - -from utils import load_audio, CSVutil - -global DoFormant, Quefrency, Timbre - -if not os.path.isdir('csvdb/'): - os.makedirs('csvdb') - frmnt, stp = open("csvdb/formanting.csv", 'w'), open("csvdb/stop.csv", 'w') - frmnt.close() - stp.close() - -try: - DoFormant, Quefrency, Timbre = CSVutil('csvdb/formanting.csv', 'r', 'formanting') - DoFormant = ( - lambda DoFormant: True if DoFormant.lower() == 'true' else (False if DoFormant.lower() == 'false' else DoFormant) - )(DoFormant) -except (ValueError, TypeError, IndexError): - DoFormant, Quefrency, Timbre = False, 1.0, 1.0 - CSVutil('csvdb/formanting.csv', 'w+', 'formanting', DoFormant, Quefrency, Timbre) - -def download_models(): - # Download hubert base model if not present - if not os.path.isfile('./hubert_base.pt'): - response = requests.get('https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt') - - if response.status_code == 200: - with open('./hubert_base.pt', 'wb') as f: - f.write(response.content) - print("Downloaded hubert base model file successfully. File saved to ./hubert_base.pt.") - else: - raise Exception("Failed to download hubert base model file. Status code: " + str(response.status_code) + ".") - - # Download rmvpe model if not present - if not os.path.isfile('./rmvpe.pt'): - response = requests.get('https://drive.usercontent.google.com/download?id=1Hkn4kNuVFRCNQwyxQFRtmzmMBGpQxptI&export=download&authuser=0&confirm=t&uuid=0b3a40de-465b-4c65-8c41-135b0b45c3f7&at=APZUnTV3lA3LnyTbeuduura6Dmi2:1693724254058') - - if response.status_code == 200: - with open('./rmvpe.pt', 'wb') as f: - f.write(response.content) - print("Downloaded rmvpe model file successfully. File saved to ./rmvpe.pt.") - else: - raise Exception("Failed to download rmvpe model file. Status code: " + str(response.status_code) + ".") - -download_models() - -print("\n-------------------------------\nRVC v2 Easy GUI (Local Edition)\n-------------------------------\n") - -def formant_apply(qfrency, tmbre): - Quefrency = qfrency - Timbre = tmbre - DoFormant = True - CSVutil('csvdb/formanting.csv', 'w+', 'formanting', DoFormant, qfrency, tmbre) - - return ({"value": Quefrency, "__type__": "update"}, {"value": Timbre, "__type__": "update"}) - -def get_fshift_presets(): - fshift_presets_list = [] - for dirpath, _, filenames in os.walk("./formantshiftcfg/"): - for filename in filenames: - if filename.endswith(".txt"): - fshift_presets_list.append(os.path.join(dirpath,filename).replace('\\','/')) - - if len(fshift_presets_list) > 0: - return fshift_presets_list - else: - return '' - - - -def formant_enabled(cbox, qfrency, tmbre, frmntapply, formantpreset, formant_refresh_button): - - if (cbox): - - DoFormant = True - CSVutil('csvdb/formanting.csv', 'w+', 'formanting', DoFormant, qfrency, tmbre) - #print(f"is checked? - {cbox}\ngot {DoFormant}") - - return ( - {"value": True, "__type__": "update"}, - {"visible": True, "__type__": "update"}, - {"visible": True, "__type__": "update"}, - {"visible": True, "__type__": "update"}, - {"visible": True, "__type__": "update"}, - {"visible": True, "__type__": "update"}, - ) - - - else: - - DoFormant = False - CSVutil('csvdb/formanting.csv', 'w+', 'formanting', DoFormant, qfrency, tmbre) - - #print(f"is checked? - {cbox}\ngot {DoFormant}") - return ( - {"value": False, "__type__": "update"}, - {"visible": False, "__type__": "update"}, - {"visible": False, "__type__": "update"}, - {"visible": False, "__type__": "update"}, - {"visible": False, "__type__": "update"}, - {"visible": False, "__type__": "update"}, - {"visible": False, "__type__": "update"}, - ) - - - -def preset_apply(preset, qfer, tmbr): - if str(preset) != '': - with open(str(preset), 'r') as p: - content = p.readlines() - qfer, tmbr = content[0].split('\n')[0], content[1] - - formant_apply(qfer, tmbr) - else: - pass - return ({"value": qfer, "__type__": "update"}, {"value": tmbr, "__type__": "update"}) - -def update_fshift_presets(preset, qfrency, tmbre): - - qfrency, tmbre = preset_apply(preset, qfrency, tmbre) - - if (str(preset) != ''): - with open(str(preset), 'r') as p: - content = p.readlines() - qfrency, tmbre = content[0].split('\n')[0], content[1] - - formant_apply(qfrency, tmbre) - else: - pass - return ( - {"choices": get_fshift_presets(), "__type__": "update"}, - {"value": qfrency, "__type__": "update"}, - {"value": tmbre, "__type__": "update"}, - ) - -i18n = I18nAuto() -#i18n.print() -# 判断是否有能用来训练和加速推理的N卡 -ngpu = torch.cuda.device_count() -gpu_infos = [] -mem = [] -if (not torch.cuda.is_available()) or ngpu == 0: - if_gpu_ok = False -else: - if_gpu_ok = False - for i in range(ngpu): - gpu_name = torch.cuda.get_device_name(i) - if ( - "10" in gpu_name - or "16" in gpu_name - or "20" in gpu_name - or "30" in gpu_name - or "40" in gpu_name - or "A2" in gpu_name.upper() - or "A3" in gpu_name.upper() - or "A4" in gpu_name.upper() - or "P4" in gpu_name.upper() - or "A50" in gpu_name.upper() - or "A60" in gpu_name.upper() - or "70" in gpu_name - or "80" in gpu_name - or "90" in gpu_name - or "M4" in gpu_name.upper() - or "T4" in gpu_name.upper() - or "TITAN" in gpu_name.upper() - ): # A10#A100#V100#A40#P40#M40#K80#A4500 - if_gpu_ok = True # 至少有一张能用的N卡 - gpu_infos.append("%s\t%s" % (i, gpu_name)) - mem.append( - int( - torch.cuda.get_device_properties(i).total_memory - / 1024 - / 1024 - / 1024 - + 0.4 - ) - ) -if if_gpu_ok == True and len(gpu_infos) > 0: - gpu_info = "\n".join(gpu_infos) - default_batch_size = min(mem) // 2 -else: - gpu_info = i18n("很遗憾您这没有能用的显卡来支持您训练") - default_batch_size = 1 -gpus = "-".join([i[0] for i in gpu_infos]) -from lib.infer_pack.models import ( - SynthesizerTrnMs256NSFsid, - SynthesizerTrnMs256NSFsid_nono, - SynthesizerTrnMs768NSFsid, - SynthesizerTrnMs768NSFsid_nono, -) -import soundfile as sf -from fairseq import checkpoint_utils -import gradio as gr -import logging -from vc_infer_pipeline import VC -from config import Config - -config = Config() -# from trainset_preprocess_pipeline import PreProcess -logging.getLogger("numba").setLevel(logging.WARNING) - -hubert_model = None - -def load_hubert(): - global hubert_model - models, _, _ = checkpoint_utils.load_model_ensemble_and_task( - ["hubert_base.pt"], - suffix="", - ) - hubert_model = models[0] - hubert_model = hubert_model.to(config.device) - if config.is_half: - hubert_model = hubert_model.half() - else: - hubert_model = hubert_model.float() - hubert_model.eval() - - -weight_root = "weights" -index_root = "logs" -names = [] -for name in os.listdir(weight_root): - if name.endswith(".pth"): - names.append(name) -index_paths = [] -for root, dirs, files in os.walk(index_root, topdown=False): - for name in files: - if name.endswith(".index") and "trained" not in name: - index_paths.append("%s/%s" % (root, name)) - - - -def vc_single( - sid, - input_audio_path, - f0_up_key, - f0_file, - f0_method, - file_index, - #file_index2, - # file_big_npy, - index_rate, - filter_radius, - resample_sr, - rms_mix_rate, - protect, - crepe_hop_length, -): # spk_item, input_audio0, vc_transform0,f0_file,f0method0 - global tgt_sr, net_g, vc, hubert_model, version - if input_audio_path is None: - return "You need to upload an audio", None - f0_up_key = int(f0_up_key) - try: - audio = load_audio(input_audio_path, 16000, DoFormant, Quefrency, Timbre) - audio_max = np.abs(audio).max() / 0.95 - if audio_max > 1: - audio /= audio_max - times = [0, 0, 0] - if hubert_model == None: - load_hubert() - if_f0 = cpt.get("f0", 1) - file_index = ( - ( - file_index.strip(" ") - .strip('"') - .strip("\n") - .strip('"') - .strip(" ") - .replace("trained", "added") - ) - ) # 防止小白写错,自动帮他替换掉 - # file_big_npy = ( - # file_big_npy.strip(" ").strip('"').strip("\n").strip('"').strip(" ") - # ) - audio_opt = vc.pipeline( - hubert_model, - net_g, - sid, - audio, - input_audio_path, - times, - f0_up_key, - f0_method, - file_index, - # file_big_npy, - index_rate, - if_f0, - filter_radius, - tgt_sr, - resample_sr, - rms_mix_rate, - version, - protect, - crepe_hop_length, - f0_file=f0_file, - ) - if resample_sr >= 16000 and tgt_sr != resample_sr: - tgt_sr = resample_sr - index_info = ( - "Using index:%s." % file_index - if os.path.exists(file_index) - else "Index not used." - ) - return "Success.\n %s\nTime:\n npy:%ss, f0:%ss, infer:%ss" % ( - index_info, - times[0], - times[1], - times[2], - ), (tgt_sr, audio_opt) - except: - info = traceback.format_exc() - print(info) - return info, (None, None) - - -def vc_multi( - sid, - dir_path, - opt_root, - paths, - f0_up_key, - f0_method, - file_index, - file_index2, - # file_big_npy, - index_rate, - filter_radius, - resample_sr, - rms_mix_rate, - protect, - format1, - crepe_hop_length, -): - try: - dir_path = ( - dir_path.strip(" ").strip('"').strip("\n").strip('"').strip(" ") - ) # 防止小白拷路径头尾带了空格和"和回车 - opt_root = opt_root.strip(" ").strip('"').strip("\n").strip('"').strip(" ") - os.makedirs(opt_root, exist_ok=True) - try: - if dir_path != "": - paths = [os.path.join(dir_path, name) for name in os.listdir(dir_path)] - else: - paths = [path.name for path in paths] - except: - traceback.print_exc() - paths = [path.name for path in paths] - infos = [] - for path in paths: - info, opt = vc_single( - sid, - path, - f0_up_key, - None, - f0_method, - file_index, - # file_big_npy, - index_rate, - filter_radius, - resample_sr, - rms_mix_rate, - protect, - crepe_hop_length - ) - if "Success" in info: - try: - tgt_sr, audio_opt = opt - if format1 in ["wav", "flac"]: - sf.write( - "%s/%s.%s" % (opt_root, os.path.basename(path), format1), - audio_opt, - tgt_sr, - ) - else: - path = "%s/%s.wav" % (opt_root, os.path.basename(path)) - sf.write( - path, - audio_opt, - tgt_sr, - ) - if os.path.exists(path): - os.system( - "ffmpeg -i %s -vn %s -q:a 2 -y" - % (path, path[:-4] + ".%s" % format1) - ) - except: - info += traceback.format_exc() - infos.append("%s->%s" % (os.path.basename(path), info)) - yield "\n".join(infos) - yield "\n".join(infos) - except: - yield traceback.format_exc() - -# 一个选项卡全局只能有一个音色 -def get_vc(sid): - global n_spk, tgt_sr, net_g, vc, cpt, version - if sid == "" or sid == []: - global hubert_model - if hubert_model != None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的 - print("clean_empty_cache") - del net_g, n_spk, vc, hubert_model, tgt_sr # ,cpt - hubert_model = net_g = n_spk = vc = hubert_model = tgt_sr = None - if torch.cuda.is_available(): - torch.cuda.empty_cache() - ###楼下不这么折腾清理不干净 - if_f0 = cpt.get("f0", 1) - version = cpt.get("version", "v1") - if version == "v1": - if if_f0 == 1: - net_g = SynthesizerTrnMs256NSFsid( - *cpt["config"], is_half=config.is_half - ) - else: - net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - elif version == "v2": - if if_f0 == 1: - net_g = SynthesizerTrnMs768NSFsid( - *cpt["config"], is_half=config.is_half - ) - else: - net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) - del net_g, cpt - if torch.cuda.is_available(): - torch.cuda.empty_cache() - cpt = None - return {"visible": False, "__type__": "update"} - person = "%s/%s" % (weight_root, sid) - print("loading %s" % person) - cpt = torch.load(person, map_location="cpu") - tgt_sr = cpt["config"][-1] - cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk - if_f0 = cpt.get("f0", 1) - version = cpt.get("version", "v1") - if version == "v1": - if if_f0 == 1: - net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half) - else: - net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - elif version == "v2": - if if_f0 == 1: - net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half) - else: - net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) - del net_g.enc_q - print(net_g.load_state_dict(cpt["weight"], strict=False)) - net_g.eval().to(config.device) - if config.is_half: - net_g = net_g.half() - else: - net_g = net_g.float() - vc = VC(tgt_sr, config) - n_spk = cpt["config"][-3] - return {"visible": False, "maximum": n_spk, "__type__": "update"} - - -def change_choices(): - names = [] - for name in os.listdir(weight_root): - if name.endswith(".pth"): - names.append(name) - index_paths = [] - for root, dirs, files in os.walk(index_root, topdown=False): - for name in files: - if name.endswith(".index") and "trained" not in name: - index_paths.append("%s/%s" % (root, name)) - return {"choices": sorted(names), "__type__": "update"}, { - "choices": sorted(index_paths), - "__type__": "update", - } - - -def clean(): - return {"value": "", "__type__": "update"} - - -sr_dict = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -def if_done(done, p): - while 1: - if p.poll() == None: - sleep(0.5) - else: - break - done[0] = True - - -def if_done_multi(done, ps): - while 1: - # poll==None代表进程未结束 - # 只要有一个进程未结束都不停 - flag = 1 - for p in ps: - if p.poll() == None: - flag = 0 - sleep(0.5) - break - if flag == 1: - break - done[0] = True - - -def preprocess_dataset(trainset_dir, exp_dir, sr, n_p): - sr = sr_dict[sr] - os.makedirs("%s/logs/%s" % (now_dir, exp_dir), exist_ok=True) - f = open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "w") - f.close() - cmd = ( - config.python_cmd - + " trainset_preprocess_pipeline_print.py %s %s %s %s/logs/%s " - % (trainset_dir, sr, n_p, now_dir, exp_dir) - + str(config.noparallel) - ) - print(cmd) - p = Popen(cmd, shell=True) # , stdin=PIPE, stdout=PIPE,stderr=PIPE,cwd=now_dir - ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 - done = [False] - threading.Thread( - target=if_done, - args=( - done, - p, - ), - ).start() - while 1: - with open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "r") as f: - yield (f.read()) - sleep(1) - if done[0] == True: - break - with open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "r") as f: - log = f.read() - print(log) - yield log - -# but2.click(extract_f0,[gpus6,np7,f0method8,if_f0_3,trainset_dir4],[info2]) -def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir, version19, echl): - gpus = gpus.split("-") - os.makedirs("%s/logs/%s" % (now_dir, exp_dir), exist_ok=True) - f = open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "w") - f.close() - if if_f0: - cmd = config.python_cmd + " extract_f0_print.py %s/logs/%s %s %s %s" % ( - now_dir, - exp_dir, - n_p, - f0method, - echl, - ) - print(cmd) - p = Popen(cmd, shell=True, cwd=now_dir) # , stdin=PIPE, stdout=PIPE,stderr=PIPE - ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 - done = [False] - threading.Thread( - target=if_done, - args=( - done, - p, - ), - ).start() - while 1: - with open( - "%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r" - ) as f: - yield (f.read()) - sleep(1) - if done[0] == True: - break - with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f: - log = f.read() - print(log) - yield log - ####对不同part分别开多进程 - """ - n_part=int(sys.argv[1]) - i_part=int(sys.argv[2]) - i_gpu=sys.argv[3] - exp_dir=sys.argv[4] - os.environ["CUDA_VISIBLE_DEVICES"]=str(i_gpu) - """ - leng = len(gpus) - ps = [] - for idx, n_g in enumerate(gpus): - cmd = ( - config.python_cmd - + " extract_feature_print.py %s %s %s %s %s/logs/%s %s" - % ( - config.device, - leng, - idx, - n_g, - now_dir, - exp_dir, - version19, - ) - ) - print(cmd) - p = Popen( - cmd, shell=True, cwd=now_dir - ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir - ps.append(p) - ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 - done = [False] - threading.Thread( - target=if_done_multi, - args=( - done, - ps, - ), - ).start() - while 1: - with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f: - yield (f.read()) - sleep(1) - if done[0] == True: - break - with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f: - log = f.read() - print(log) - yield log - - -def change_sr2(sr2, if_f0_3, version19): - path_str = "" if version19 == "v1" else "_v2" - f0_str = "f0" if if_f0_3 else "" - if_pretrained_generator_exist = os.access("pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2), os.F_OK) - if_pretrained_discriminator_exist = os.access("pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2), os.F_OK) - if (if_pretrained_generator_exist == False): - print("pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2), "not exist, will not use pretrained model") - if (if_pretrained_discriminator_exist == False): - print("pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2), "not exist, will not use pretrained model") - return ( - ("pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2)) if if_pretrained_generator_exist else "", - ("pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2)) if if_pretrained_discriminator_exist else "", - {"visible": True, "__type__": "update"} - ) - -def change_version19(sr2, if_f0_3, version19): - path_str = "" if version19 == "v1" else "_v2" - f0_str = "f0" if if_f0_3 else "" - if_pretrained_generator_exist = os.access("pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2), os.F_OK) - if_pretrained_discriminator_exist = os.access("pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2), os.F_OK) - if (if_pretrained_generator_exist == False): - print("pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2), "not exist, will not use pretrained model") - if (if_pretrained_discriminator_exist == False): - print("pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2), "not exist, will not use pretrained model") - return ( - ("pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2)) if if_pretrained_generator_exist else "", - ("pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2)) if if_pretrained_discriminator_exist else "", - ) - - -def change_f0(if_f0_3, sr2, version19): # f0method8,pretrained_G14,pretrained_D15 - path_str = "" if version19 == "v1" else "_v2" - if_pretrained_generator_exist = os.access("pretrained%s/f0G%s.pth" % (path_str, sr2), os.F_OK) - if_pretrained_discriminator_exist = os.access("pretrained%s/f0D%s.pth" % (path_str, sr2), os.F_OK) - if (if_pretrained_generator_exist == False): - print("pretrained%s/f0G%s.pth" % (path_str, sr2), "not exist, will not use pretrained model") - if (if_pretrained_discriminator_exist == False): - print("pretrained%s/f0D%s.pth" % (path_str, sr2), "not exist, will not use pretrained model") - if if_f0_3: - return ( - {"visible": True, "__type__": "update"}, - "pretrained%s/f0G%s.pth" % (path_str, sr2) if if_pretrained_generator_exist else "", - "pretrained%s/f0D%s.pth" % (path_str, sr2) if if_pretrained_discriminator_exist else "", - ) - return ( - {"visible": False, "__type__": "update"}, - ("pretrained%s/G%s.pth" % (path_str, sr2)) if if_pretrained_generator_exist else "", - ("pretrained%s/D%s.pth" % (path_str, sr2)) if if_pretrained_discriminator_exist else "", - ) - - -global log_interval - - -def set_log_interval(exp_dir, batch_size12): - log_interval = 1 - - folder_path = os.path.join(exp_dir, "1_16k_wavs") - - if os.path.exists(folder_path) and os.path.isdir(folder_path): - wav_files = [f for f in os.listdir(folder_path) if f.endswith(".wav")] - if wav_files: - sample_size = len(wav_files) - log_interval = math.ceil(sample_size / batch_size12) - if log_interval > 1: - log_interval += 1 - return log_interval - -# but3.click(click_train,[exp_dir1,sr2,if_f0_3,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16]) -def click_train( - exp_dir1, - sr2, - if_f0_3, - spk_id5, - save_epoch10, - total_epoch11, - batch_size12, - if_save_latest13, - pretrained_G14, - pretrained_D15, - gpus16, - if_cache_gpu17, - if_save_every_weights18, - version19, -): - CSVutil('csvdb/stop.csv', 'w+', 'formanting', False) - # 生成filelist - exp_dir = "%s/logs/%s" % (now_dir, exp_dir1) - os.makedirs(exp_dir, exist_ok=True) - gt_wavs_dir = "%s/0_gt_wavs" % (exp_dir) - feature_dir = ( - "%s/3_feature256" % (exp_dir) - if version19 == "v1" - else "%s/3_feature768" % (exp_dir) - ) - - log_interval = set_log_interval(exp_dir, batch_size12) - - if if_f0_3: - f0_dir = "%s/2a_f0" % (exp_dir) - f0nsf_dir = "%s/2b-f0nsf" % (exp_dir) - names = ( - set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)]) - & set([name.split(".")[0] for name in os.listdir(feature_dir)]) - & set([name.split(".")[0] for name in os.listdir(f0_dir)]) - & set([name.split(".")[0] for name in os.listdir(f0nsf_dir)]) - ) - else: - names = set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)]) & set( - [name.split(".")[0] for name in os.listdir(feature_dir)] - ) - opt = [] - for name in names: - if if_f0_3: - opt.append( - "%s/%s.wav|%s/%s.npy|%s/%s.wav.npy|%s/%s.wav.npy|%s" - % ( - gt_wavs_dir.replace("\\", "\\\\"), - name, - feature_dir.replace("\\", "\\\\"), - name, - f0_dir.replace("\\", "\\\\"), - name, - f0nsf_dir.replace("\\", "\\\\"), - name, - spk_id5, - ) - ) - else: - opt.append( - "%s/%s.wav|%s/%s.npy|%s" - % ( - gt_wavs_dir.replace("\\", "\\\\"), - name, - feature_dir.replace("\\", "\\\\"), - name, - spk_id5, - ) - ) - fea_dim = 256 if version19 == "v1" else 768 - if if_f0_3: - for _ in range(2): - opt.append( - "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s/logs/mute/2a_f0/mute.wav.npy|%s/logs/mute/2b-f0nsf/mute.wav.npy|%s" - % (now_dir, sr2, now_dir, fea_dim, now_dir, now_dir, spk_id5) - ) - else: - for _ in range(2): - opt.append( - "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s" - % (now_dir, sr2, now_dir, fea_dim, spk_id5) - ) - shuffle(opt) - with open("%s/filelist.txt" % exp_dir, "w") as f: - f.write("\n".join(opt)) - print("write filelist done") - # 生成config#无需生成config - # cmd = python_cmd + " train_nsf_sim_cache_sid_load_pretrain.py -e mi-test -sr 40k -f0 1 -bs 4 -g 0 -te 10 -se 5 -pg pretrained/f0G40k.pth -pd pretrained/f0D40k.pth -l 1 -c 0" - print("use gpus:", gpus16) - if pretrained_G14 == "": - print("no pretrained Generator") - if pretrained_D15 == "": - print("no pretrained Discriminator") - if gpus16: - cmd = ( - config.python_cmd - + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -g %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s -li %s" - % ( - exp_dir1, - sr2, - 1 if if_f0_3 else 0, - batch_size12, - gpus16, - total_epoch11, - save_epoch10, - ("-pg %s" % pretrained_G14) if pretrained_G14 != "" else "", - ("-pd %s" % pretrained_D15) if pretrained_D15 != "" else "", - 1 if if_save_latest13 == True else 0, - 1 if if_cache_gpu17 == True else 0, - 1 if if_save_every_weights18 == True else 0, - version19, - log_interval, - ) - ) - else: - cmd = ( - config.python_cmd - + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s -li %s" - % ( - exp_dir1, - sr2, - 1 if if_f0_3 else 0, - batch_size12, - total_epoch11, - save_epoch10, - ("-pg %s" % pretrained_G14) if pretrained_G14 != "" else "\b", - ("-pd %s" % pretrained_D15) if pretrained_D15 != "" else "\b", - 1 if if_save_latest13 == True else 0, - 1 if if_cache_gpu17 == True else 0, - 1 if if_save_every_weights18 == True else 0, - version19, - log_interval, - ) - ) - print(cmd) - p = Popen(cmd, shell=True, cwd=now_dir) - global PID - PID = p.pid - p.wait() - return ("训练结束, 您可查看控制台训练日志或实验文件夹下的train.log", {"visible": False, "__type__": "update"}, {"visible": True, "__type__": "update"}) - - -# but4.click(train_index, [exp_dir1], info3) -def train_index(exp_dir1, version19): - exp_dir = "%s/logs/%s" % (now_dir, exp_dir1) - os.makedirs(exp_dir, exist_ok=True) - feature_dir = ( - "%s/3_feature256" % (exp_dir) - if version19 == "v1" - else "%s/3_feature768" % (exp_dir) - ) - if os.path.exists(feature_dir) == False: - return "请先进行特征提取!" - listdir_res = list(os.listdir(feature_dir)) - if len(listdir_res) == 0: - return "请先进行特征提取!" - npys = [] - for name in sorted(listdir_res): - phone = np.load("%s/%s" % (feature_dir, name)) - npys.append(phone) - big_npy = np.concatenate(npys, 0) - big_npy_idx = np.arange(big_npy.shape[0]) - np.random.shuffle(big_npy_idx) - big_npy = big_npy[big_npy_idx] - np.save("%s/total_fea.npy" % exp_dir, big_npy) - # n_ivf = big_npy.shape[0] // 39 - n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])), big_npy.shape[0] // 39) - infos = [] - infos.append("%s,%s" % (big_npy.shape, n_ivf)) - yield "\n".join(infos) - index = faiss.index_factory(256 if version19 == "v1" else 768, "IVF%s,Flat" % n_ivf) - # index = faiss.index_factory(256if version19=="v1"else 768, "IVF%s,PQ128x4fs,RFlat"%n_ivf) - infos.append("training") - yield "\n".join(infos) - index_ivf = faiss.extract_index_ivf(index) # - index_ivf.nprobe = 1 - index.train(big_npy) - faiss.write_index( - index, - "%s/trained_IVF%s_Flat_nprobe_%s_%s_%s.index" - % (exp_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19), - ) - # faiss.write_index(index, '%s/trained_IVF%s_Flat_FastScan_%s.index'%(exp_dir,n_ivf,version19)) - infos.append("adding") - yield "\n".join(infos) - batch_size_add = 8192 - for i in range(0, big_npy.shape[0], batch_size_add): - index.add(big_npy[i : i + batch_size_add]) - faiss.write_index( - index, - "%s/added_IVF%s_Flat_nprobe_%s_%s_%s.index" - % (exp_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19), - ) - infos.append( - "成功构建索引,added_IVF%s_Flat_nprobe_%s_%s_%s.index" - % (n_ivf, index_ivf.nprobe, exp_dir1, version19) - ) - # faiss.write_index(index, '%s/added_IVF%s_Flat_FastScan_%s.index'%(exp_dir,n_ivf,version19)) - # infos.append("成功构建索引,added_IVF%s_Flat_FastScan_%s.index"%(n_ivf,version19)) - yield "\n".join(infos) - - -# but5.click(train1key, [exp_dir1, sr2, if_f0_3, trainset_dir4, spk_id5, gpus6, np7, f0method8, save_epoch10, total_epoch11, batch_size12, if_save_latest13, pretrained_G14, pretrained_D15, gpus16, if_cache_gpu17], info3) -def train1key( - exp_dir1, - sr2, - if_f0_3, - trainset_dir4, - spk_id5, - np7, - f0method8, - save_epoch10, - total_epoch11, - batch_size12, - if_save_latest13, - pretrained_G14, - pretrained_D15, - gpus16, - if_cache_gpu17, - if_save_every_weights18, - version19, - echl -): - infos = [] - - def get_info_str(strr): - infos.append(strr) - return "\n".join(infos) - - model_log_dir = "%s/logs/%s" % (now_dir, exp_dir1) - preprocess_log_path = "%s/preprocess.log" % model_log_dir - extract_f0_feature_log_path = "%s/extract_f0_feature.log" % model_log_dir - gt_wavs_dir = "%s/0_gt_wavs" % model_log_dir - feature_dir = ( - "%s/3_feature256" % model_log_dir - if version19 == "v1" - else "%s/3_feature768" % model_log_dir - ) - - os.makedirs(model_log_dir, exist_ok=True) - #########step1:处理数据 - open(preprocess_log_path, "w").close() - cmd = ( - config.python_cmd - + " trainset_preprocess_pipeline_print.py %s %s %s %s " - % (trainset_dir4, sr_dict[sr2], np7, model_log_dir) - + str(config.noparallel) - ) - yield get_info_str(i18n("step1:正在处理数据")) - yield get_info_str(cmd) - p = Popen(cmd, shell=True) - p.wait() - with open(preprocess_log_path, "r") as f: - print(f.read()) - #########step2a:提取音高 - open(extract_f0_feature_log_path, "w") - if if_f0_3: - yield get_info_str("step2a:正在提取音高") - cmd = config.python_cmd + " extract_f0_print.py %s %s %s %s" % ( - model_log_dir, - np7, - f0method8, - echl - ) - yield get_info_str(cmd) - p = Popen(cmd, shell=True, cwd=now_dir) - p.wait() - with open(extract_f0_feature_log_path, "r") as f: - print(f.read()) - else: - yield get_info_str(i18n("step2a:无需提取音高")) - #######step2b:提取特征 - yield get_info_str(i18n("step2b:正在提取特征")) - gpus = gpus16.split("-") - leng = len(gpus) - ps = [] - for idx, n_g in enumerate(gpus): - cmd = config.python_cmd + " extract_feature_print.py %s %s %s %s %s %s" % ( - config.device, - leng, - idx, - n_g, - model_log_dir, - version19, - ) - yield get_info_str(cmd) - p = Popen( - cmd, shell=True, cwd=now_dir - ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir - ps.append(p) - for p in ps: - p.wait() - with open(extract_f0_feature_log_path, "r") as f: - print(f.read()) - #######step3a:训练模型 - yield get_info_str(i18n("step3a:正在训练模型")) - # 生成filelist - if if_f0_3: - f0_dir = "%s/2a_f0" % model_log_dir - f0nsf_dir = "%s/2b-f0nsf" % model_log_dir - names = ( - set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)]) - & set([name.split(".")[0] for name in os.listdir(feature_dir)]) - & set([name.split(".")[0] for name in os.listdir(f0_dir)]) - & set([name.split(".")[0] for name in os.listdir(f0nsf_dir)]) - ) - else: - names = set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)]) & set( - [name.split(".")[0] for name in os.listdir(feature_dir)] - ) - opt = [] - for name in names: - if if_f0_3: - opt.append( - "%s/%s.wav|%s/%s.npy|%s/%s.wav.npy|%s/%s.wav.npy|%s" - % ( - gt_wavs_dir.replace("\\", "\\\\"), - name, - feature_dir.replace("\\", "\\\\"), - name, - f0_dir.replace("\\", "\\\\"), - name, - f0nsf_dir.replace("\\", "\\\\"), - name, - spk_id5, - ) - ) - else: - opt.append( - "%s/%s.wav|%s/%s.npy|%s" - % ( - gt_wavs_dir.replace("\\", "\\\\"), - name, - feature_dir.replace("\\", "\\\\"), - name, - spk_id5, - ) - ) - fea_dim = 256 if version19 == "v1" else 768 - if if_f0_3: - for _ in range(2): - opt.append( - "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s/logs/mute/2a_f0/mute.wav.npy|%s/logs/mute/2b-f0nsf/mute.wav.npy|%s" - % (now_dir, sr2, now_dir, fea_dim, now_dir, now_dir, spk_id5) - ) - else: - for _ in range(2): - opt.append( - "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s" - % (now_dir, sr2, now_dir, fea_dim, spk_id5) - ) - shuffle(opt) - with open("%s/filelist.txt" % model_log_dir, "w") as f: - f.write("\n".join(opt)) - yield get_info_str("write filelist done") - if gpus16: - cmd = ( - config.python_cmd - +" train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -g %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s" - % ( - exp_dir1, - sr2, - 1 if if_f0_3 else 0, - batch_size12, - gpus16, - total_epoch11, - save_epoch10, - ("-pg %s" % pretrained_G14) if pretrained_G14 != "" else "", - ("-pd %s" % pretrained_D15) if pretrained_D15 != "" else "", - 1 if if_save_latest13 == True else 0, - 1 if if_cache_gpu17 == True else 0, - 1 if if_save_every_weights18 == True else 0, - version19, - ) - ) - else: - cmd = ( - config.python_cmd - + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s" - % ( - exp_dir1, - sr2, - 1 if if_f0_3 else 0, - batch_size12, - total_epoch11, - save_epoch10, - ("-pg %s" % pretrained_G14) if pretrained_G14 != "" else "", - ("-pd %s" % pretrained_D15) if pretrained_D15 != "" else "", - 1 if if_save_latest13 == True else 0, - 1 if if_cache_gpu17 == True else 0, - 1 if if_save_every_weights18 == True else 0, - version19, - ) - ) - yield get_info_str(cmd) - p = Popen(cmd, shell=True, cwd=now_dir) - p.wait() - yield get_info_str(i18n("训练结束, 您可查看控制台训练日志或实验文件夹下的train.log")) - #######step3b:训练索引 - npys = [] - listdir_res = list(os.listdir(feature_dir)) - for name in sorted(listdir_res): - phone = np.load("%s/%s" % (feature_dir, name)) - npys.append(phone) - big_npy = np.concatenate(npys, 0) - - big_npy_idx = np.arange(big_npy.shape[0]) - np.random.shuffle(big_npy_idx) - big_npy = big_npy[big_npy_idx] - np.save("%s/total_fea.npy" % model_log_dir, big_npy) - - # n_ivf = big_npy.shape[0] // 39 - n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])), big_npy.shape[0] // 39) - yield get_info_str("%s,%s" % (big_npy.shape, n_ivf)) - index = faiss.index_factory(256 if version19 == "v1" else 768, "IVF%s,Flat" % n_ivf) - yield get_info_str("training index") - index_ivf = faiss.extract_index_ivf(index) # - index_ivf.nprobe = 1 - index.train(big_npy) - faiss.write_index( - index, - "%s/trained_IVF%s_Flat_nprobe_%s_%s_%s.index" - % (model_log_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19), - ) - yield get_info_str("adding index") - batch_size_add = 8192 - for i in range(0, big_npy.shape[0], batch_size_add): - index.add(big_npy[i : i + batch_size_add]) - faiss.write_index( - index, - "%s/added_IVF%s_Flat_nprobe_%s_%s_%s.index" - % (model_log_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19), - ) - yield get_info_str( - "成功构建索引, added_IVF%s_Flat_nprobe_%s_%s_%s.index" - % (n_ivf, index_ivf.nprobe, exp_dir1, version19) - ) - yield get_info_str(i18n("全流程结束!")) - - -def whethercrepeornah(radio): - mango = True if radio == 'mangio-crepe' or radio == 'mangio-crepe-tiny' else False - return ({"visible": mango, "__type__": "update"}) - -# ckpt_path2.change(change_info_,[ckpt_path2],[sr__,if_f0__]) -def change_info_(ckpt_path): - if ( - os.path.exists(ckpt_path.replace(os.path.basename(ckpt_path), "train.log")) - == False - ): - return {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"} - try: - with open( - ckpt_path.replace(os.path.basename(ckpt_path), "train.log"), "r" - ) as f: - info = eval(f.read().strip("\n").split("\n")[0].split("\t")[-1]) - sr, f0 = info["sample_rate"], info["if_f0"] - version = "v2" if ("version" in info and info["version"] == "v2") else "v1" - return sr, str(f0), version - except: - traceback.print_exc() - return {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"} - - -from lib.infer_pack.models_onnx import SynthesizerTrnMsNSFsidM - - -def export_onnx(ModelPath, ExportedPath, MoeVS=True): - cpt = torch.load(ModelPath, map_location="cpu") - cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk - hidden_channels = 256 if cpt.get("version","v1")=="v1"else 768#cpt["config"][-2] # hidden_channels,为768Vec做准备 - - test_phone = torch.rand(1, 200, hidden_channels) # hidden unit - test_phone_lengths = torch.tensor([200]).long() # hidden unit 长度(貌似没啥用) - test_pitch = torch.randint(size=(1, 200), low=5, high=255) # 基频(单位赫兹) - test_pitchf = torch.rand(1, 200) # nsf基频 - test_ds = torch.LongTensor([0]) # 说话人ID - test_rnd = torch.rand(1, 192, 200) # 噪声(加入随机因子) - - device = "cpu" # 导出时设备(不影响使用模型) - - - net_g = SynthesizerTrnMsNSFsidM( - *cpt["config"], is_half=False,version=cpt.get("version","v1") - ) # fp32导出(C++要支持fp16必须手动将内存重新排列所以暂时不用fp16) - net_g.load_state_dict(cpt["weight"], strict=False) - input_names = ["phone", "phone_lengths", "pitch", "pitchf", "ds", "rnd"] - output_names = [ - "audio", - ] - # net_g.construct_spkmixmap(n_speaker) 多角色混合轨道导出 - torch.onnx.export( - net_g, - ( - test_phone.to(device), - test_phone_lengths.to(device), - test_pitch.to(device), - test_pitchf.to(device), - test_ds.to(device), - test_rnd.to(device), - ), - ExportedPath, - dynamic_axes={ - "phone": [1], - "pitch": [1], - "pitchf": [1], - "rnd": [2], - }, - do_constant_folding=False, - opset_version=16, - verbose=False, - input_names=input_names, - output_names=output_names, - ) - return "Finished" - -#region RVC WebUI App - -def get_presets(): - data = None - with open('../inference-presets.json', 'r') as file: - data = json.load(file) - preset_names = [] - for preset in data['presets']: - preset_names.append(preset['name']) - - return preset_names - -def change_choices2(): - audio_files=[] - for filename in os.listdir("./audios"): - if filename.endswith(('.wav','.mp3','.ogg','.flac','.m4a','.aac','.mp4')): - audio_files.append(os.path.join('./audios',filename).replace('\\', '/')) - return {"choices": sorted(audio_files), "__type__": "update"}, {"__type__": "update"} - -audio_files=[] -for filename in os.listdir("./audios"): - if filename.endswith(('.wav','.mp3','.ogg','.flac','.m4a','.aac','.mp4')): - audio_files.append(os.path.join('./audios',filename).replace('\\', '/')) - -def get_index(): - if check_for_name() != '': - chosen_model=sorted(names)[0].split(".")[0] - logs_path="./logs/"+chosen_model - if os.path.exists(logs_path): - for file in os.listdir(logs_path): - if file.endswith(".index"): - return os.path.join(logs_path, file) - return '' - else: - return '' - -def get_indexes(): - indexes_list=[] - for dirpath, dirnames, filenames in os.walk("./logs/"): - for filename in filenames: - if filename.endswith(".index"): - indexes_list.append(os.path.join(dirpath,filename)) - if len(indexes_list) > 0: - return indexes_list - else: - return '' - -def get_name(): - if len(audio_files) > 0: - return sorted(audio_files)[0] - else: - return '' - -def save_to_wav(record_button): - if record_button is None: - pass - else: - path_to_file=record_button - new_name = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'.wav' - new_path='./audios/'+new_name - shutil.move(path_to_file,new_path) - return new_path - -def save_to_wav2(dropbox): - file_path=dropbox.name - shutil.move(file_path,'./audios') - return os.path.join('./audios',os.path.basename(file_path)) - -def match_index(sid0): - folder=sid0.split(".")[0] - parent_dir="./logs/"+folder - if os.path.exists(parent_dir): - for filename in os.listdir(parent_dir): - if filename.endswith(".index"): - index_path=os.path.join(parent_dir,filename) - return index_path - else: - return '' - -def check_for_name(): - if len(names) > 0: - return sorted(names)[0] - else: - return '' - -def download_from_url(url, model): - if url == '': - return "URL cannot be left empty." - if model =='': - return "You need to name your model. For example: My-Model" - url = url.strip() - zip_dirs = ["zips", "unzips"] - for directory in zip_dirs: - if os.path.exists(directory): - shutil.rmtree(directory) - os.makedirs("zips", exist_ok=True) - os.makedirs("unzips", exist_ok=True) - zipfile = model + '.zip' - zipfile_path = './zips/' + zipfile - try: - if "drive.google.com" in url: - subprocess.run(["gdown", url, "--fuzzy", "-O", zipfile_path]) - elif "mega.nz" in url: - m = Mega() - m.download_url(url, './zips') - else: - subprocess.run(["wget", url, "-O", zipfile_path]) - for filename in os.listdir("./zips"): - if filename.endswith(".zip"): - zipfile_path = os.path.join("./zips/",filename) - shutil.unpack_archive(zipfile_path, "./unzips", 'zip') - else: - return "No zipfile found." - for root, dirs, files in os.walk('./unzips'): - for file in files: - file_path = os.path.join(root, file) - if file.endswith(".index"): - os.mkdir(f'./logs/{model}') - shutil.copy2(file_path,f'./logs/{model}') - elif "G_" not in file and "D_" not in file and file.endswith(".pth"): - shutil.copy(file_path,f'./weights/{model}.pth') - shutil.rmtree("zips") - shutil.rmtree("unzips") - return "Model downloaded, you can go back to the inference page!" - except: - return "ERROR - The download failed. Check if the link is valid." -def success_message(face): - return f'{face.name} has been uploaded.', 'None' -def mouth(size, face, voice, faces): - if size == 'Half': - size = 2 - else: - size = 1 - if faces == 'None': - character = face.name - else: - if faces == 'Ben Shapiro': - character = '/content/wav2lip-HD/inputs/ben-shapiro-10.mp4' - elif faces == 'Andrew Tate': - character = '/content/wav2lip-HD/inputs/tate-7.mp4' - command = "python inference.py " \ - "--checkpoint_path checkpoints/wav2lip.pth " \ - f"--face {character} " \ - f"--audio {voice} " \ - "--pads 0 20 0 0 " \ - "--outfile /content/wav2lip-HD/outputs/result.mp4 " \ - "--fps 24 " \ - f"--resize_factor {size}" - process = subprocess.Popen(command, shell=True, cwd='/content/wav2lip-HD/Wav2Lip-master') - stdout, stderr = process.communicate() - return '/content/wav2lip-HD/outputs/result.mp4', 'Animation completed.' -eleven_voices = ['Adam','Antoni','Josh','Arnold','Sam','Bella','Rachel','Domi','Elli'] -eleven_voices_ids=['pNInz6obpgDQGcFmaJgB','ErXwobaYiN019PkySvjV','TxGEqnHWrfWFTfGW9XjX','VR6AewLTigWG4xSOukaG','yoZ06aMxZJJ28mfd3POQ','EXAVITQu4vr4xnSDxMaL','21m00Tcm4TlvDq8ikWAM','AZnzlk1XvdvUeBnXmlld','MF3mGyEYCl7XYWbV9V6O'] -chosen_voice = dict(zip(eleven_voices, eleven_voices_ids)) - -def stoptraining(mim): - if int(mim) == 1: - try: - CSVutil('csvdb/stop.csv', 'w+', 'stop', 'True') - os.kill(PID, signal.SIGTERM) - except Exception as e: - print(f"Couldn't click due to {e}") - return ( - {"visible": False, "__type__": "update"}, - {"visible": True, "__type__": "update"}, - ) - - -def elevenTTS(xiapi, text, id, lang): - if xiapi!= '' and id !='': - choice = chosen_voice[id] - CHUNK_SIZE = 1024 - url = f"https://api.elevenlabs.io/v1/text-to-speech/{choice}" - headers = { - "Accept": "audio/mpeg", - "Content-Type": "application/json", - "xi-api-key": xiapi - } - if lang == 'en': - data = { - "text": text, - "model_id": "eleven_monolingual_v1", - "voice_settings": { - "stability": 0.5, - "similarity_boost": 0.5 - } - } - else: - data = { - "text": text, - "model_id": "eleven_multilingual_v1", - "voice_settings": { - "stability": 0.5, - "similarity_boost": 0.5 - } - } - - response = requests.post(url, json=data, headers=headers) - with open('./temp_eleven.mp3', 'wb') as f: - for chunk in response.iter_content(chunk_size=CHUNK_SIZE): - if chunk: - f.write(chunk) - aud_path = save_to_wav('./temp_eleven.mp3') - return aud_path, aud_path - else: - tts = gTTS(text, lang=lang) - tts.save('./temp_gTTS.mp3') - aud_path = save_to_wav('./temp_gTTS.mp3') - return aud_path, aud_path - -def upload_to_dataset(files, dir): - if dir == '': - dir = './dataset' - if not os.path.exists(dir): - os.makedirs(dir) - count = 0 - for file in files: - path=file.name - shutil.copy2(path,dir) - count += 1 - return f' {count} files uploaded to {dir}.' - -def zip_downloader(model): - if not os.path.exists(f'./weights/{model}.pth'): - return {"__type__": "update"}, f'Make sure the Voice Name is correct. I could not find {model}.pth' - index_found = False - for file in os.listdir(f'./logs/{model}'): - if file.endswith('.index') and 'added' in file: - log_file = file - index_found = True - if index_found: - return [f'./weights/{model}.pth', f'./logs/{model}/{log_file}'], "Done" - else: - return f'./weights/{model}.pth', "Could not find Index file." - -with gr.Blocks(theme=gr.themes.Base (), title='Mangio-RVC-Web 💻') as app: - with gr.Tabs(): - with gr.TabItem("Inference"): - gr.HTML("

Ilaria RVC 💖

") - gr.HTML(" You can find voice models on AI Hub: https://discord.gg/aihub ") - gr.HTML("

Huggingface port by Ilaria of the Rejekt Easy GUI

") - - # Inference Preset Row - # with gr.Row(): - # mangio_preset = gr.Dropdown(label="Inference Preset", choices=sorted(get_presets())) - # mangio_preset_name_save = gr.Textbox( - # label="Your preset name" - # ) - # mangio_preset_save_btn = gr.Button('Save Preset', variant="primary") - - # Other RVC stuff - with gr.Row(): - sid0 = gr.Dropdown(label="1.Choose the model.", choices=sorted(names), value=check_for_name()) - refresh_button = gr.Button("Refresh", variant="primary") - if check_for_name() != '': - get_vc(sorted(names)[0]) - vc_transform0 = gr.Number(label="Pitch: 0 from man to man (or woman to woman); 12 from man to woman and -12 from woman to man.", value=0) - #clean_button = gr.Button(i18n("卸载音色省显存"), variant="primary") - spk_item = gr.Slider( - minimum=0, - maximum=2333, - step=1, - label=i18n("请选择说话人id"), - value=0, - visible=False, - interactive=True, - ) - #clean_button.click(fn=clean, inputs=[], outputs=[sid0]) - sid0.change( - fn=get_vc, - inputs=[sid0], - outputs=[spk_item], - ) - but0 = gr.Button("Convert", variant="primary") - with gr.Row(): - with gr.Column(): - with gr.Row(): - dropbox = gr.File(label="Drag your audio file and click refresh.") - with gr.Row(): - record_button=gr.Audio(source="microphone", label="Or you can use your microphone!", type="filepath") - with gr.Row(): - input_audio0 = gr.Dropdown( - label="2.Choose the audio file.", - value="./audios/Test_Audio.mp3", - choices=audio_files - ) - dropbox.upload(fn=save_to_wav2, inputs=[dropbox], outputs=[input_audio0]) - dropbox.upload(fn=change_choices2, inputs=[], outputs=[input_audio0]) - refresh_button2 = gr.Button("Refresh", variant="primary", size='sm') - record_button.change(fn=save_to_wav, inputs=[record_button], outputs=[input_audio0]) - record_button.change(fn=change_choices2, inputs=[], outputs=[input_audio0]) - with gr.Row(): - with gr.Accordion('Text To Speech', open=False): - with gr.Column(): - lang = gr.Radio(label='Chinese & Japanese do not work with ElevenLabs currently.',choices=['en','es','fr','pt','zh-CN','de','hi','ja'], value='en') - api_box = gr.Textbox(label="Enter your API Key for ElevenLabs, or leave empty to use GoogleTTS", value='') - elevenid=gr.Dropdown(label="Voice:", choices=eleven_voices) - with gr.Column(): - tfs = gr.Textbox(label="Input your Text", interactive=True, value="This is a test.") - tts_button = gr.Button(value="Speak") - tts_button.click(fn=elevenTTS, inputs=[api_box,tfs, elevenid, lang], outputs=[record_button, input_audio0]) - with gr.Row(): - with gr.Accordion('Wav2Lip', open=False): - with gr.Row(): - size = gr.Radio(label='Resolution:',choices=['Half','Full']) - face = gr.UploadButton("Upload A Character",type='file') - faces = gr.Dropdown(label="OR Choose one:", choices=['None','Ben Shapiro','Andrew Tate']) - with gr.Row(): - preview = gr.Textbox(label="Status:",interactive=False) - face.upload(fn=success_message,inputs=[face], outputs=[preview, faces]) - with gr.Row(): - animation = gr.Video(type='filepath') - refresh_button2.click(fn=change_choices2, inputs=[], outputs=[input_audio0, animation]) - with gr.Row(): - animate_button = gr.Button('Animate') - - with gr.Column(): - with gr.Accordion("Index Settings", open=False): - file_index1 = gr.Dropdown( - label="3. Choose the index file (in case it wasn't automatically found.)", - choices=get_indexes(), - value=get_index(), - interactive=True, - ) - sid0.change(fn=match_index, inputs=[sid0],outputs=[file_index1]) - refresh_button.click( - fn=change_choices, inputs=[], outputs=[sid0, file_index1] - ) - # file_big_npy1 = gr.Textbox( - # label=i18n("特征文件路径"), - # value="E:\\codes\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy", - # interactive=True, - # ) - index_rate1 = gr.Slider( - minimum=0, - maximum=1, - label=i18n("检索特征占比"), - value=0.66, - interactive=True, - ) - vc_output2 = gr.Audio( - label="Final Result! (Click on the three dots to download the audio)", - type='filepath', - interactive=False, - ) - animate_button.click(fn=mouth, inputs=[size, face, vc_output2, faces], outputs=[animation, preview]) - with gr.Accordion("Advanced Options", open=False): - f0method0 = gr.Radio( - label="Optional: Change the Pitch Extraction Algorithm. Extraction methods are sorted from 'worst quality' to 'best quality'. If you don't know what you're doing, leave rmvpe.", - choices=["pm", "dio", "crepe-tiny", "mangio-crepe-tiny", "crepe", "harvest", "mangio-crepe", "rmvpe"], # Fork Feature. Add Crepe-Tiny - value="rmvpe", - interactive=True, - ) - - crepe_hop_length = gr.Slider( - minimum=1, - maximum=512, - step=1, - label="Mangio-Crepe Hop Length. Higher numbers will reduce the chance of extreme pitch changes but lower numbers will increase accuracy. 64-192 is a good range to experiment with.", - value=120, - interactive=True, - visible=False, - ) - f0method0.change(fn=whethercrepeornah, inputs=[f0method0], outputs=[crepe_hop_length]) - filter_radius0 = gr.Slider( - minimum=0, - maximum=7, - label=i18n(">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音"), - value=3, - step=1, - interactive=True, - ) - resample_sr0 = gr.Slider( - minimum=0, - maximum=48000, - label=i18n("后处理重采样至最终采样率,0为不进行重采样"), - value=0, - step=1, - interactive=True, - visible=False - ) - rms_mix_rate0 = gr.Slider( - minimum=0, - maximum=1, - label=i18n("输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络"), - value=0.21, - interactive=True, - ) - protect0 = gr.Slider( - minimum=0, - maximum=0.5, - label=i18n("保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果"), - value=0.33, - step=0.01, - interactive=True, - ) - formanting = gr.Checkbox( - value=bool(DoFormant), - label="[EXPERIMENTAL] Formant shift inference audio", - info="Used for male to female and vice-versa conversions", - interactive=True, - visible=True, - ) - - formant_preset = gr.Dropdown( - value='', - choices=get_fshift_presets(), - label="browse presets for formanting", - visible=bool(DoFormant), - ) - formant_refresh_button = gr.Button( - value='\U0001f504', - visible=bool(DoFormant), - variant='primary', - ) - #formant_refresh_button = ToolButton( elem_id='1') - #create_refresh_button(formant_preset, lambda: {"choices": formant_preset}, "refresh_list_shiftpresets") - - qfrency = gr.Slider( - value=Quefrency, - info="Default value is 1.0", - label="Quefrency for formant shifting", - minimum=0.0, - maximum=16.0, - step=0.1, - visible=bool(DoFormant), - interactive=True, - ) - tmbre = gr.Slider( - value=Timbre, - info="Default value is 1.0", - label="Timbre for formant shifting", - minimum=0.0, - maximum=16.0, - step=0.1, - visible=bool(DoFormant), - interactive=True, - ) - - formant_preset.change(fn=preset_apply, inputs=[formant_preset, qfrency, tmbre], outputs=[qfrency, tmbre]) - frmntbut = gr.Button("Apply", variant="primary", visible=bool(DoFormant)) - formanting.change(fn=formant_enabled,inputs=[formanting,qfrency,tmbre,frmntbut,formant_preset,formant_refresh_button],outputs=[formanting,qfrency,tmbre,frmntbut,formant_preset,formant_refresh_button]) - frmntbut.click(fn=formant_apply,inputs=[qfrency, tmbre], outputs=[qfrency, tmbre]) - formant_refresh_button.click(fn=update_fshift_presets,inputs=[formant_preset, qfrency, tmbre],outputs=[formant_preset, qfrency, tmbre]) - with gr.Row(): - vc_output1 = gr.Textbox("") - f0_file = gr.File(label=i18n("F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调"), visible=False) - - but0.click( - vc_single, - [ - spk_item, - input_audio0, - vc_transform0, - f0_file, - f0method0, - file_index1, - # file_index2, - # file_big_npy1, - index_rate1, - filter_radius0, - resample_sr0, - rms_mix_rate0, - protect0, - crepe_hop_length - ], - [vc_output1, vc_output2], - ) - - with gr.Accordion("Batch Conversion",open=False): - with gr.Row(): - with gr.Column(): - vc_transform1 = gr.Number( - label=i18n("变调(整数, 半音数量, 升八度12降八度-12)"), value=0 - ) - opt_input = gr.Textbox(label=i18n("指定输出文件夹"), value="opt") - f0method1 = gr.Radio( - label=i18n( - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU" - ), - choices=["pm", "harvest", "crepe", "rmvpe"], - value="rmvpe", - interactive=True, - ) - filter_radius1 = gr.Slider( - minimum=0, - maximum=7, - label=i18n(">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音"), - value=3, - step=1, - interactive=True, - ) - with gr.Column(): - file_index3 = gr.Textbox( - label=i18n("特征检索库文件路径,为空则使用下拉的选择结果"), - value="", - interactive=True, - ) - file_index4 = gr.Dropdown( - label=i18n("自动检测index路径,下拉式选择(dropdown)"), - choices=sorted(index_paths), - interactive=True, - ) - refresh_button.click( - fn=lambda: change_choices()[1], - inputs=[], - outputs=file_index4, - ) - # file_big_npy2 = gr.Textbox( - # label=i18n("特征文件路径"), - # value="E:\\codes\\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy", - # interactive=True, - # ) - index_rate2 = gr.Slider( - minimum=0, - maximum=1, - label=i18n("检索特征占比"), - value=1, - interactive=True, - ) - with gr.Column(): - resample_sr1 = gr.Slider( - minimum=0, - maximum=48000, - label=i18n("后处理重采样至最终采样率,0为不进行重采样"), - value=0, - step=1, - interactive=True, - ) - rms_mix_rate1 = gr.Slider( - minimum=0, - maximum=1, - label=i18n("输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络"), - value=1, - interactive=True, - ) - protect1 = gr.Slider( - minimum=0, - maximum=0.5, - label=i18n( - "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果" - ), - value=0.33, - step=0.01, - interactive=True, - ) - with gr.Column(): - dir_input = gr.Textbox( - label=i18n("输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)"), - value="E:\codes\py39\\test-20230416b\\todo-songs", - ) - inputs = gr.File( - file_count="multiple", label=i18n("也可批量输入音频文件, 二选一, 优先读文件夹") - ) - with gr.Row(): - format1 = gr.Radio( - label=i18n("导出文件格式"), - choices=["wav", "flac", "mp3", "m4a"], - value="flac", - interactive=True, - ) - but1 = gr.Button(i18n("转换"), variant="primary") - vc_output3 = gr.Textbox(label=i18n("输出信息")) - but1.click( - vc_multi, - [ - spk_item, - dir_input, - opt_input, - inputs, - vc_transform1, - f0method1, - file_index3, - file_index4, - # file_big_npy2, - index_rate2, - filter_radius1, - resample_sr1, - rms_mix_rate1, - protect1, - format1, - crepe_hop_length, - ], - [vc_output3], - ) - but1.click(fn=lambda: easy_uploader.clear()) - with gr.TabItem("Download Voice Models"): - with gr.Row(): - url=gr.Textbox(label="Huggingface Link:") - with gr.Row(): - model = gr.Textbox(label="Name of the model (without spaces):") - download_button=gr.Button("Download") - with gr.Row(): - status_bar=gr.Textbox(label="") - download_button.click(fn=download_from_url, inputs=[url, model], outputs=[status_bar]) - with gr.Row(): - gr.Markdown( - """ - Made with 💖 by Ilaria | Support her on [Ko-Fi](https://ko-fi.com/ilariaowo) - """ - ) - - def has_two_files_in_pretrained_folder(): - pretrained_folder = "./pretrained/" - if not os.path.exists(pretrained_folder): - return False - - files_in_folder = os.listdir(pretrained_folder) - num_files = len(files_in_folder) - return num_files >= 2 - - if has_two_files_in_pretrained_folder(): - print("Pretrained weights are downloaded. Training tab enabled!\n-------------------------------") - with gr.TabItem("Train", visible=False): - with gr.Row(): - with gr.Column(): - exp_dir1 = gr.Textbox(label="Voice Name:", value="My-Voice") - sr2 = gr.Radio( - label=i18n("目标采样率"), - choices=["40k", "48k"], - value="40k", - interactive=True, - visible=False - ) - if_f0_3 = gr.Radio( - label=i18n("模型是否带音高指导(唱歌一定要, 语音可以不要)"), - choices=[True, False], - value=True, - interactive=True, - visible=False - ) - version19 = gr.Radio( - label="RVC version", - choices=["v1", "v2"], - value="v2", - interactive=True, - visible=False, - ) - np7 = gr.Slider( - minimum=0, - maximum=config.n_cpu, - step=1, - label="# of CPUs for data processing (Leave as it is)", - value=config.n_cpu, - interactive=True, - visible=True - ) - trainset_dir4 = gr.Textbox(label="Path to your dataset (audios, not zip):", value="./dataset") - easy_uploader = gr.Files(label='OR Drop your audios here. They will be uploaded in your dataset path above.',file_types=['audio']) - but1 = gr.Button("1. Process The Dataset", variant="primary") - info1 = gr.Textbox(label="Status (wait until it says 'end preprocess'):", value="") - easy_uploader.upload(fn=upload_to_dataset, inputs=[easy_uploader, trainset_dir4], outputs=[info1]) - but1.click( - preprocess_dataset, [trainset_dir4, exp_dir1, sr2, np7], [info1] - ) - with gr.Column(): - spk_id5 = gr.Slider( - minimum=0, - maximum=4, - step=1, - label=i18n("请指定说话人id"), - value=0, - interactive=True, - visible=False - ) - with gr.Accordion('GPU Settings', open=False, visible=False): - gpus6 = gr.Textbox( - label=i18n("以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2"), - value=gpus, - interactive=True, - visible=False - ) - gpu_info9 = gr.Textbox(label=i18n("显卡信息"), value=gpu_info) - f0method8 = gr.Radio( - label=i18n( - "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢" - ), - choices=["harvest","crepe", "mangio-crepe", "rmvpe"], # Fork feature: Crepe on f0 extraction for training. - value="rmvpe", - interactive=True, - ) - - extraction_crepe_hop_length = gr.Slider( - minimum=1, - maximum=512, - step=1, - label=i18n("crepe_hop_length"), - value=128, - interactive=True, - visible=False, - ) - f0method8.change(fn=whethercrepeornah, inputs=[f0method8], outputs=[extraction_crepe_hop_length]) - but2 = gr.Button("2. Pitch Extraction", variant="primary") - info2 = gr.Textbox(label="Status(Check the Colab Notebook's cell output):", value="", max_lines=8) - but2.click( - extract_f0_feature, - [gpus6, np7, f0method8, if_f0_3, exp_dir1, version19, extraction_crepe_hop_length], - [info2], - ) - with gr.Row(): - with gr.Column(): - total_epoch11 = gr.Slider( - minimum=1, - maximum=5000, - step=10, - label="Total # of training epochs (IF you choose a value too high, your model will sound horribly overtrained.):", - value=250, - interactive=True, - ) - butstop = gr.Button( - "Stop Training", - variant='primary', - visible=False, - ) - but3 = gr.Button("3. Train Model", variant="primary", visible=True) - - but3.click(fn=stoptraining, inputs=[gr.Number(value=0, visible=False)], outputs=[but3, butstop]) - butstop.click(fn=stoptraining, inputs=[gr.Number(value=1, visible=False)], outputs=[butstop, but3]) - - - but4 = gr.Button("4.Train Index", variant="primary") - info3 = gr.Textbox(label="Status(Check the Colab Notebook's cell output):", value="", max_lines=10) - with gr.Accordion("Training Preferences (You can leave these as they are)", open=False): - #gr.Markdown(value=i18n("step3: 填写训练设置, 开始训练模型和索引")) - with gr.Column(): - save_epoch10 = gr.Slider( - minimum=1, - maximum=200, - step=1, - label="Backup every X amount of epochs:", - value=10, - interactive=True, - ) - batch_size12 = gr.Slider( - minimum=1, - maximum=40, - step=1, - label="Batch Size (LEAVE IT unless you know what you're doing!):", - value=default_batch_size, - interactive=True, - ) - if_save_latest13 = gr.Checkbox( - label="Save only the latest '.ckpt' file to save disk space.", - value=True, - interactive=True, - ) - if_cache_gpu17 = gr.Checkbox( - label="Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training, but caching large datasets will consume a lot of GPU memory and may not provide much speed improvement.", - value=False, - interactive=True, - ) - if_save_every_weights18 = gr.Checkbox( - label="Save a small final model to the 'weights' folder at each save point.", - value=True, - interactive=True, - ) - zip_model = gr.Button('5. Download Model') - zipped_model = gr.Files(label='Your Model and Index file can be downloaded here:') - zip_model.click(fn=zip_downloader, inputs=[exp_dir1], outputs=[zipped_model, info3]) - with gr.Group(): - with gr.Accordion("Base Model Locations:", open=False, visible=False): - pretrained_G14 = gr.Textbox( - label=i18n("加载预训练底模G路径"), - value="pretrained_v2/f0G40k.pth", - interactive=True, - ) - pretrained_D15 = gr.Textbox( - label=i18n("加载预训练底模D路径"), - value="pretrained_v2/f0D40k.pth", - interactive=True, - ) - gpus16 = gr.Textbox( - label=i18n("以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2"), - value=gpus, - interactive=True, - ) - sr2.change( - change_sr2, - [sr2, if_f0_3, version19], - [pretrained_G14, pretrained_D15, version19], - ) - version19.change( - change_version19, - [sr2, if_f0_3, version19], - [pretrained_G14, pretrained_D15], - ) - if_f0_3.change( - change_f0, - [if_f0_3, sr2, version19], - [f0method8, pretrained_G14, pretrained_D15], - ) - but5 = gr.Button(i18n("一键训练"), variant="primary", visible=False) - but3.click( - click_train, - [ - exp_dir1, - sr2, - if_f0_3, - spk_id5, - save_epoch10, - total_epoch11, - batch_size12, - if_save_latest13, - pretrained_G14, - pretrained_D15, - gpus16, - if_cache_gpu17, - if_save_every_weights18, - version19, - ], - [ - info3, - butstop, - but3, - ], - ) - but4.click(train_index, [exp_dir1, version19], info3) - but5.click( - train1key, - [ - exp_dir1, - sr2, - if_f0_3, - trainset_dir4, - spk_id5, - np7, - f0method8, - save_epoch10, - total_epoch11, - batch_size12, - if_save_latest13, - pretrained_G14, - pretrained_D15, - gpus16, - if_cache_gpu17, - if_save_every_weights18, - version19, - extraction_crepe_hop_length - ], - info3, - ) - - else: - print( - "Pretrained weights not downloaded. Disabling training tab.\n" - "Wondering how to train a voice? Visit here for the RVC model training guide: https://t.ly/RVC_Training_Guide\n" - "-------------------------------\n" - ) - - app.queue(concurrency_count=511, max_size=1022).launch(share=False, quiet=True) -#endregion \ No newline at end of file diff --git a/spaces/ThirdEyeData/Price_Optimization/app.py b/spaces/ThirdEyeData/Price_Optimization/app.py deleted file mode 100644 index d5a5f9c80f30a6bb86f937c9e30f07bcf6714c89..0000000000000000000000000000000000000000 --- a/spaces/ThirdEyeData/Price_Optimization/app.py +++ /dev/null @@ -1,54 +0,0 @@ -import numpy as np -import math -import matplotlib.pyplot as plt -import seaborn as sns -plt.style.use('seaborn-white') -import pandas as pd -from matplotlib import animation, rc -import torch.nn.functional as F -import torch -import torch.nn as nn -import torch.optim as optim -plt.rcParams.update({'pdf.fonttype': 'truetype'}) -import pickle -pc2 = pickle.load(open('price.pkl','rb')) -from PIL import Image -import streamlit as st - -st.title("Price Optimization") - -def to_tensor(x): - return torch.from_numpy(np.array(x).astype(np.float32)) -def prediction(price_max,price_step,policy_net): - price_grid = np.arange(price_step, price_max, price_step) - sample_state = [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., \ - 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.] - Q_s = policy_net(to_tensor(sample_state)) - a_opt = Q_s.max(0)[1].detach() - plt.figure(figsize=(16, 5)) - plt.xlabel("Price action ($)") - plt.ylabel("Q ($)") - plt.bar(price_grid, Q_s.detach().numpy(), color='crimson', width=6, alpha=0.8) - plt.savefig('price.png') - return price_grid[a_opt] - -def fun(): - st.header("Optimal Price Action") - st.subheader(str(a)) - - return -st.header("Enter the Specification") -max_value = st.number_input('Enter the Maximum Value of Price',min_value=50,value = 500,step=1) -step = st.number_input('Enter the Price step',min_value = 10,value = 10,step=1) -a = prediction(max_value,step,pc2) -if st.button('Predict'): - fun() - image = Image.open('price.png') - st.image(image,caption = 'Price Optimization',width =1000) - - - - - - - \ No newline at end of file diff --git a/spaces/ThomasSimonini/Huggy/TemplateData/style.css b/spaces/ThomasSimonini/Huggy/TemplateData/style.css deleted file mode 100644 index cdc3477fb8c1c824db96f451631bca7cde305923..0000000000000000000000000000000000000000 --- a/spaces/ThomasSimonini/Huggy/TemplateData/style.css +++ /dev/null @@ -1,105 +0,0 @@ -html { - box-sizing: border-box; -} -*, *:before, *:after { - box-sizing: inherit; -} -html, body { - height: 100%; -} -canvas { - display: block; -} -body { - margin: 0; -} -#unity-container { - width: 100%; - height: 100%; -} -#unity-canvas { - width: 100%; - height: 100%; - background: #231F20; -} -#loading-cover { - position: absolute; - top: 0; - left: 0; - width: 100%; - height: 100%; - display: flex; - justify-content: center; - align-items: center; -} -#unity-loading-bar { - flex: 1 1 auto; - display: flex; - flex-direction: column; - justify-content: center; - align-items: center; -} -#unity-logo { - text-align: center; -} -#unity-logo img { - max-width: 80%; -} -#unity-progress-bar-empty { - width: 80%; - height: 24px; - margin: 10px 20px 20px 10px; - text-align: left; - border: 1px solid white; - padding: 2px; -} -#unity-progress-bar-full { - width: 0%; - height: 100%; - background: #ffd21e; -} -.light #unity-progress-bar-empty { - border-color: black; -} -.light #unity-progress-bar-full { - background: black; -} - -#unity-fullscreen-button { - position: absolute; - right: 10px; - bottom: 10px; - width: 38px; - height: 38px; - background: url('fullscreen-button.png') no-repeat center; - background-size: contain; -} - -.spinner, -.spinner:after { - border-radius: 50%; - width: 5em; - height: 5em; -} -.spinner { - margin: 10px; - font-size: 10px; - position: relative; - text-indent: -9999em; - border-top: 1.1em solid rgba(255, 255, 255, 0.2); - border-right: 1.1em solid rgba(255, 255, 255, 0.2); - border-bottom: 1.1em solid rgba(255, 255, 255, 0.2); - border-left: 1.1em solid #ffffff; - transform: translateZ(0); - animation: spinner-spin 1.1s infinite linear; -} -@keyframes spinner-spin { - 0% { - transform: rotate(0deg); - } - 100% { - transform: rotate(360deg); - } -} - - diff --git a/spaces/UjjwalVIT/Text_analysis_and_metadata_app/app_utils.py b/spaces/UjjwalVIT/Text_analysis_and_metadata_app/app_utils.py deleted file mode 100644 index 139ff58cb198122e04fc66efba9091ab0af4f7b5..0000000000000000000000000000000000000000 --- a/spaces/UjjwalVIT/Text_analysis_and_metadata_app/app_utils.py +++ /dev/null @@ -1,210 +0,0 @@ -import streamlit as st -import pandas as pd -import streamlit.components.v1 as stc -import nltk - -# NLP Package-used for text analysis -import nltk -nltk.download('all') -from sumy.parsers.plaintext import PlaintextParser -from nltk.tokenize import word_tokenize -from nltk.tag import pos_tag -from nltk.stem import WordNetLemmatizer -from sumy.summarizers.lex_rank import LexRankSummarizer -from sumy.summarizers.text_rank import TextRankSummarizer -from nltk.corpus import stopwords -from nltk.tokenize import sent_tokenize -from sumy.nlp.tokenizers import Tokenizer -from rouge import Rouge -from transformers import BartForConditionalGeneration, BartTokenizer -from transformers import T5ForConditionalGeneration, T5Tokenizer -from transformers import AutoTokenizer, AutoModelForTokenClassification,pipeline - -# from nltk import ne_chunk -from nltk.tag import StanfordNERTagger - -from collections import Counter - -from textblob import TextBlob -import seaborn as sns -import matplotlib.pyplot as plt - -from wordcloud import WordCloud - -import base64 -import time - -# stanford_ner_jar_path = 'stanford_model/stanford-ner.jar' -# # Path to the pre-trained NER model file -# stanford_ner_model_path ='stanford_model/english.all.3class.distsim.crf.ser.gz' - -timestr = time.strftime("%Y%m%d-%H%M%S") - - -# from spacy import displacy - - -#Text cleaning packages -# removing stopwords, removing special characters, removing URLs, normalizing text, removing HTML tags, correcting common spelling mistakes, -import neattext as nt -import neattext.functions as nfx - - -HTML_WRAPPER = """
{} -
-""" - -def evaluate_summary(summary,reference): - r=Rouge() - eval_score=r.get_scores(summary,reference) - eval_score_df=pd.DataFrame(eval_score[0]) - return eval_score_df - - -def bart_summary(docx): - model=BartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn') - tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn') - inputs = tokenizer.batch_encode_plus([docx], truncation=True, padding='longest', max_length=1024, return_tensors='pt') - summary_ids = model.generate(inputs['input_ids'], num_beams=6, max_length=100, early_stopping=True) - summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True) - return summary - -def T5_summary(docx): - model = T5ForConditionalGeneration.from_pretrained('t5-base') - tokenizer = T5Tokenizer.from_pretrained('t5-base') - input_text = "summarize: " + docx - input_ids = tokenizer.encode(input_text, return_tensors='pt') - summary_ids = model.generate(input_ids, max_length=100, num_beams=4, early_stopping=True) - summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True) - return summary - -def sumy_summarizer(docx,num=5): - parser=PlaintextParser.from_string(docx,Tokenizer("english")) - lex_summ=LexRankSummarizer() - summary=lex_summ(parser.document,sentences_count= num) - summary_list=[str(sentence) for sentence in summary] - result=' '.join(summary_list) - return result - -def sumy_text_summarizer(docx, num=5): - parser = PlaintextParser.from_string(docx, Tokenizer("english")) - text_rank_summarizer = TextRankSummarizer() - summary = text_rank_summarizer(parser.document, sentences_count=num) - summary_list = [str(sentence) for sentence in summary] - result = ' '.join(summary_list) - return result - - -def nlp_analysis(text): - token_data = [] - tokens=word_tokenize(text) - tagged_tokens = pos_tag(tokens) #categorize into nouns, verbs, adjectives, adverbs, pronouns etc - stop_words = set(stopwords.words('english')) #check for words like a", "an", "the", "is", "in" - lemmatizer = WordNetLemmatizer() #preprocessing - for token in tagged_tokens: - token_text=token[0] - token_shape = None - token_pos = token[1] # "," - Comma CC - Coordinating conjunction DT - Determiner NN - Noun VBD - Past tense verb PRP - Personal pronoun VBD - Past tense verb - token_lemma = lemmatizer.lemmatize(token_text) - token_is_alpha = token_text.isalpha() - token_is_stop = token_text.lower() in stop_words - token_data.append([token_text,token_shape,token_pos,token_lemma,token_is_alpha,token_is_stop]) - df=pd.DataFrame(token_data,columns=['Token','Shape','Position','lemma','Contains_Alphabets','Contains_Stop_words']) - return df - - - -def find_entities(text): - tokenizer = AutoTokenizer.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english") - model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english") - nlp = pipeline("ner", model=model, tokenizer=tokenizer) - e=nlp(text) - entities=[(entity["word"], entity["entity"]) for entity in e] - entities=HTML_WRAPPER.format(entities) - return entities - - - - - -def file_download(data): - csv_file= data.to_csv() - b64=base64.b64encode(csv_file.encode()).decode() - new_filename="result_{}.csv".format(timestr) - st.markdown('### 🗃️ Download csv file ') - href=f' Click Here! ' - st.markdown(href, unsafe_allow_html=True) - -def get_most_common_tokens(text): - word_tokens=Counter(text.split()) - most_common=dict(word_tokens.most_common(len(text))) - return most_common - - -def get_semantics(text): - blob=TextBlob(text) - sentiment=blob.sentiment - return sentiment - -def plot_wordcloud(text): - text_workcloud= WordCloud().generate(text) #size indicates its frequency - fig=plt.figure() - plt.imshow(text_workcloud,interpolation='bilinear') - plt.axis('off') - st.pyplot(fig) - -def pos_tags(text): - blob=TextBlob(text) - tagged_text=blob.tags - tagged_df=pd.DataFrame(tagged_text,columns=['tokens','tags']) - return tagged_df - -TAGS = { - 'NN' : 'green', - 'NNS' : 'green', - 'NNP' : 'green', - 'NNPS' : 'green', - 'VB' : 'blue', - 'VBD' : 'blue', - 'VBG' : 'blue', - 'VBN' : 'blue', - 'VBP' : 'blue', - 'VBZ' : 'blue', - 'JJ' : 'red', - 'JJR' : 'red', - 'JJS' : 'red', - 'RB' : 'cyan', - 'RBR' : 'cyan', - 'RBS' : 'cyan', - 'IN' : 'darkwhite', - 'POS' : 'darkyellow', - 'PRP$' : 'magenta', - 'PRP$' : 'magenta', - 'DET' : 'black', - 'CC' : 'black', - 'CD' : 'black', - 'WDT' : 'black', - 'WP' : 'black', - 'WP$' : 'black', - 'WRB' : 'black', - 'EX' : 'yellow', - 'FW' : 'yellow', - 'LS' : 'yellow', - 'MD' : 'yellow', - 'PDT' : 'yellow', - 'RP' : 'yellow', - 'SYM' : 'yellow', - 'TO' : 'yellow', - 'None' : 'off' - } - -def tag_visualize(tagged_df): - colored_text=[] - for i in tagged_df: - if i[1] in TAGS.keys(): - token=i[0] - color_of_text=TAGS.get(i[1]) - changed_text='{}'.format(color_of_text,token) - colored_text.append(changed_text) - result=''.join(colored_text) - return result \ No newline at end of file diff --git a/spaces/Vedarutvija/Veda_Audio_To_Text/README.md b/spaces/Vedarutvija/Veda_Audio_To_Text/README.md deleted file mode 100644 index ba3ca7fe48b28782bfbd68edf9b768424fd6f6a9..0000000000000000000000000000000000000000 --- a/spaces/Vedarutvija/Veda_Audio_To_Text/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Llama -emoji: 🦀 -colorFrom: gray -colorTo: blue -sdk: gradio -sdk_version: 3.38.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Vision-CAIR/MiniGPT-v2/LICENSE_Lavis.md b/spaces/Vision-CAIR/MiniGPT-v2/LICENSE_Lavis.md deleted file mode 100644 index 9ba97919e5b9568c8b9c42ea85251f01049a220e..0000000000000000000000000000000000000000 --- a/spaces/Vision-CAIR/MiniGPT-v2/LICENSE_Lavis.md +++ /dev/null @@ -1,14 +0,0 @@ -BSD 3-Clause License - -Copyright (c) 2022 Salesforce, Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -3. Neither the name of Salesforce.com nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/spaces/Workhack/chatgpt-prompt-playground/index.html b/spaces/Workhack/chatgpt-prompt-playground/index.html deleted file mode 100644 index f496e7faf778e896c5a10e8ad04dfb039c1bd3ae..0000000000000000000000000000000000000000 --- a/spaces/Workhack/chatgpt-prompt-playground/index.html +++ /dev/null @@ -1 +0,0 @@ -Prompts Playground
\ No newline at end of file diff --git a/spaces/Xenova/text-to-speech-client/assets/index-5644c887.css b/spaces/Xenova/text-to-speech-client/assets/index-5644c887.css deleted file mode 100644 index a5e21b3c7de305d425a0a5bb9d399030308004ed..0000000000000000000000000000000000000000 --- a/spaces/Xenova/text-to-speech-client/assets/index-5644c887.css +++ /dev/null @@ -1 +0,0 @@ -*,:before,:after{box-sizing:border-box;border-width:0;border-style:solid;border-color:#e5e7eb}:before,:after{--tw-content: ""}html{line-height:1.5;-webkit-text-size-adjust:100%;-moz-tab-size:4;-o-tab-size:4;tab-size:4;font-family:ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,"Apple Color Emoji","Segoe UI Emoji",Segoe UI Symbol,"Noto Color Emoji";font-feature-settings:normal;font-variation-settings:normal}body{margin:0;line-height:inherit}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,samp,pre{font-family:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace;font-size:1em}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}button,input,optgroup,select,textarea{font-family:inherit;font-feature-settings:inherit;font-variation-settings:inherit;font-size:100%;font-weight:inherit;line-height:inherit;color:inherit;margin:0;padding:0}button,select{text-transform:none}button,[type=button],[type=reset],[type=submit]{-webkit-appearance:button;background-color:transparent;background-image:none}:-moz-focusring{outline:auto}:-moz-ui-invalid{box-shadow:none}progress{vertical-align:baseline}::-webkit-inner-spin-button,::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}summary{display:list-item}blockquote,dl,dd,h1,h2,h3,h4,h5,h6,hr,figure,p,pre{margin:0}fieldset{margin:0;padding:0}legend{padding:0}ol,ul,menu{list-style:none;margin:0;padding:0}dialog{padding:0}textarea{resize:vertical}input::-moz-placeholder,textarea::-moz-placeholder{opacity:1;color:#9ca3af}input::placeholder,textarea::placeholder{opacity:1;color:#9ca3af}button,[role=button]{cursor:pointer}:disabled{cursor:default}img,svg,video,canvas,audio,iframe,embed,object{display:block;vertical-align:middle}img,video{max-width:100%;height:auto}[hidden]{display:none}*,:before,:after{--tw-border-spacing-x: 0;--tw-border-spacing-y: 0;--tw-translate-x: 0;--tw-translate-y: 0;--tw-rotate: 0;--tw-skew-x: 0;--tw-skew-y: 0;--tw-scale-x: 1;--tw-scale-y: 1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness: proximity;--tw-gradient-from-position: ;--tw-gradient-via-position: ;--tw-gradient-to-position: ;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width: 0px;--tw-ring-offset-color: #fff;--tw-ring-color: rgb(59 130 246 / .5);--tw-ring-offset-shadow: 0 0 #0000;--tw-ring-shadow: 0 0 #0000;--tw-shadow: 0 0 #0000;--tw-shadow-colored: 0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: }::backdrop{--tw-border-spacing-x: 0;--tw-border-spacing-y: 0;--tw-translate-x: 0;--tw-translate-y: 0;--tw-rotate: 0;--tw-skew-x: 0;--tw-skew-y: 0;--tw-scale-x: 1;--tw-scale-y: 1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness: proximity;--tw-gradient-from-position: ;--tw-gradient-via-position: ;--tw-gradient-to-position: ;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width: 0px;--tw-ring-offset-color: #fff;--tw-ring-color: rgb(59 130 246 / .5);--tw-ring-offset-shadow: 0 0 #0000;--tw-ring-shadow: 0 0 #0000;--tw-shadow: 0 0 #0000;--tw-shadow-colored: 0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: }.static{position:static}.absolute{position:absolute}.relative{position:relative}.left-0{left:0}.top-0{top:0}.z-10{z-index:10}.z-50{z-index:50}.m-2{margin:.5rem}.my-4{margin-top:1rem;margin-bottom:1rem}.mb-1{margin-bottom:.25rem}.mb-2{margin-bottom:.5rem}.mb-4{margin-bottom:1rem}.block{display:block}.flex{display:flex}.h-14{height:3.5rem}.h-full{height:100%}.min-h-screen{min-height:100vh}.w-\[1\%\]{width:1%}.w-full{width:100%}.max-w-xl{max-width:36rem}.cursor-not-allowed{cursor:not-allowed}.flex-col{flex-direction:column}.items-center{align-items:center}.justify-center{justify-content:center}.gap-1{gap:.25rem}.overflow-hidden{overflow:hidden}.whitespace-nowrap{white-space:nowrap}.rounded-lg{border-radius:.5rem}.rounded-md{border-radius:.375rem}.border{border-width:1px}.border-gray-300{--tw-border-opacity: 1;border-color:rgb(209 213 219 / var(--tw-border-opacity))}.bg-blue-500{--tw-bg-opacity: 1;background-color:rgb(59 130 246 / var(--tw-bg-opacity))}.bg-gray-100{--tw-bg-opacity: 1;background-color:rgb(243 244 246 / var(--tw-bg-opacity))}.bg-gray-400{--tw-bg-opacity: 1;background-color:rgb(156 163 175 / var(--tw-bg-opacity))}.bg-white{--tw-bg-opacity: 1;background-color:rgb(255 255 255 / var(--tw-bg-opacity))}.p-2{padding:.5rem}.p-3{padding:.75rem}.p-8{padding:2rem}.px-2{padding-left:.5rem;padding-right:.5rem}.px-4{padding-left:1rem;padding-right:1rem}.px-8{padding-left:2rem;padding-right:2rem}.py-2{padding-top:.5rem;padding-bottom:.5rem}.text-left{text-align:left}.text-center{text-align:center}.text-3xl{font-size:1.875rem;line-height:2.25rem}.text-base{font-size:1rem;line-height:1.5rem}.text-sm{font-size:.875rem;line-height:1.25rem}.text-xl{font-size:1.25rem;line-height:1.75rem}.font-medium{font-weight:500}.font-semibold{font-weight:600}.text-black{--tw-text-opacity: 1;color:rgb(0 0 0 / var(--tw-text-opacity))}.text-gray-600{--tw-text-opacity: 1;color:rgb(75 85 99 / var(--tw-text-opacity))}.text-gray-700{--tw-text-opacity: 1;color:rgb(55 65 81 / var(--tw-text-opacity))}.text-gray-800{--tw-text-opacity: 1;color:rgb(31 41 55 / var(--tw-text-opacity))}.text-white{--tw-text-opacity: 1;color:rgb(255 255 255 / var(--tw-text-opacity))}.shadow-lg{--tw-shadow: 0 10px 15px -3px rgb(0 0 0 / .1), 0 4px 6px -4px rgb(0 0 0 / .1);--tw-shadow-colored: 0 10px 15px -3px var(--tw-shadow-color), 0 4px 6px -4px var(--tw-shadow-color);box-shadow:var(--tw-ring-offset-shadow, 0 0 #0000),var(--tw-ring-shadow, 0 0 #0000),var(--tw-shadow)}.shadow-xl{--tw-shadow: 0 20px 25px -5px rgb(0 0 0 / .1), 0 8px 10px -6px rgb(0 0 0 / .1);--tw-shadow-colored: 0 20px 25px -5px var(--tw-shadow-color), 0 8px 10px -6px var(--tw-shadow-color);box-shadow:var(--tw-ring-offset-shadow, 0 0 #0000),var(--tw-ring-shadow, 0 0 #0000),var(--tw-shadow)}.shadow-black\/5{--tw-shadow-color: rgb(0 0 0 / .05);--tw-shadow: var(--tw-shadow-colored)}.ring-1{--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(1px + var(--tw-ring-offset-width)) var(--tw-ring-color);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow, 0 0 #0000)}.ring-slate-700\/10{--tw-ring-color: rgb(51 65 85 / .1)}.blur{--tw-blur: blur(8px);filter:var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow)}.filter{filter:var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow)}.transition-all{transition-property:all;transition-timing-function:cubic-bezier(.4,0,.2,1);transition-duration:.15s}:root{font-family:Inter,system-ui,Avenir,Helvetica,Arial,sans-serif;line-height:1.5;font-weight:400;color:#213547;background-color:#fff;font-synthesis:none;text-rendering:optimizeLegibility;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;-webkit-text-size-adjust:100%}audio::-webkit-media-controls-panel{background-color:#fff}.hover\:bg-blue-600:hover{--tw-bg-opacity: 1;background-color:rgb(37 99 235 / var(--tw-bg-opacity))} diff --git a/spaces/XzJosh/nine1-Bert-VITS2/utils.py b/spaces/XzJosh/nine1-Bert-VITS2/utils.py deleted file mode 100644 index c6aa6cfc64c33e2eed33e9845239e831fc1c4a1a..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/nine1-Bert-VITS2/utils.py +++ /dev/null @@ -1,293 +0,0 @@ -import os -import glob -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -from scipy.io.wavfile import read -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None and not skip_optimizer and checkpoint_dict['optimizer'] is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - elif optimizer is None and not skip_optimizer: - #else: #Disable this line if Infer ,and enable the line upper - new_opt_dict = optimizer.state_dict() - new_opt_dict_params = new_opt_dict['param_groups'][0]['params'] - new_opt_dict['param_groups'] = checkpoint_dict['optimizer']['param_groups'] - new_opt_dict['param_groups'][0]['params'] = new_opt_dict_params - optimizer.load_state_dict(new_opt_dict) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): - try: - #assert "emb_g" not in k - # print("load", k) - new_state_dict[k] = saved_state_dict[k] - assert saved_state_dict[k].shape == v.shape, (saved_state_dict[k].shape, v.shape) - except: - print("error, %s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict, strict=False) - else: - model.load_state_dict(new_state_dict, strict=False) - print("load ") - logger.info("Loaded checkpoint '{}' (iteration {})".format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): - logger.info("Saving model and optimizer state at iteration {} to {}".format( - iteration, checkpoint_path)) - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - torch.save({'model': state_dict, - 'iteration': iteration, - 'optimizer': optimizer.state_dict(), - 'learning_rate': learning_rate}, checkpoint_path) - - -def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050): - for k, v in scalars.items(): - writer.add_scalar(k, v, global_step) - for k, v in histograms.items(): - writer.add_histogram(k, v, global_step) - for k, v in images.items(): - writer.add_image(k, v, global_step, dataformats='HWC') - for k, v in audios.items(): - writer.add_audio(k, v, global_step, audio_sampling_rate) - - -def latest_checkpoint_path(dir_path, regex="G_*.pth"): - f_list = glob.glob(os.path.join(dir_path, regex)) - f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) - x = f_list[-1] - print(x) - return x - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, default="./OUTPUT_MODEL", - help='Model name') - parser.add_argument('--cont', dest='cont', action="store_true", default=False, help="whether to continue training on the latest checkpoint") - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - hparams.cont = args.cont - return hparams - - -def clean_checkpoints(path_to_models='logs/44k/', n_ckpts_to_keep=2, sort_by_time=True): - """Freeing up space by deleting saved ckpts - - Arguments: - path_to_models -- Path to the model directory - n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth - sort_by_time -- True -> chronologically delete ckpts - False -> lexicographically delete ckpts - """ - import re - ckpts_files = [f for f in os.listdir(path_to_models) if os.path.isfile(os.path.join(path_to_models, f))] - name_key = (lambda _f: int(re.compile('._(\d+)\.pth').match(_f).group(1))) - time_key = (lambda _f: os.path.getmtime(os.path.join(path_to_models, _f))) - sort_key = time_key if sort_by_time else name_key - x_sorted = lambda _x: sorted([f for f in ckpts_files if f.startswith(_x) and not f.endswith('_0.pth')], - key=sort_key) - to_del = [os.path.join(path_to_models, fn) for fn in - (x_sorted('G')[:-n_ckpts_to_keep] + x_sorted('D')[:-n_ckpts_to_keep])] - del_info = lambda fn: logger.info(f".. Free up space by deleting ckpt {fn}") - del_routine = lambda x: [os.remove(x), del_info(x)] - rs = [del_routine(fn) for fn in to_del] - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/YazawaSunrise/so-vits-svc-LoveLive/inference/__init__.py b/spaces/YazawaSunrise/so-vits-svc-LoveLive/inference/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/ddpm/pipeline_ddpm.py b/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/ddpm/pipeline_ddpm.py deleted file mode 100644 index 114a38a5fec7a471ed60be1c38ace65f86c903dd..0000000000000000000000000000000000000000 --- a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/ddpm/pipeline_ddpm.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from typing import Optional, Tuple, Union - -import torch - -from ...configuration_utils import FrozenDict -from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput -from ...utils import deprecate - - -class DDPMPipeline(DiffusionPipeline): - r""" - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) - - Parameters: - unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of - [`DDPMScheduler`], or [`DDIMScheduler`]. - """ - - def __init__(self, unet, scheduler): - super().__init__() - self.register_modules(unet=unet, scheduler=scheduler) - - @torch.no_grad() - def __call__( - self, - batch_size: int = 1, - generator: Optional[torch.Generator] = None, - num_inference_steps: int = 1000, - output_type: Optional[str] = "pil", - return_dict: bool = True, - **kwargs, - ) -> Union[ImagePipelineOutput, Tuple]: - r""" - Args: - batch_size (`int`, *optional*, defaults to 1): - The number of images to generate. - generator (`torch.Generator`, *optional*): - A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation - deterministic. - num_inference_steps (`int`, *optional*, defaults to 1000): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple. - - Returns: - [`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if - `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the - generated images. - """ - message = ( - "Please make sure to instantiate your scheduler with `prediction_type` instead. E.g. `scheduler =" - " DDPMScheduler.from_pretrained(, prediction_type='epsilon')`." - ) - predict_epsilon = deprecate("predict_epsilon", "0.11.0", message, take_from=kwargs) - - if predict_epsilon is not None: - new_config = dict(self.scheduler.config) - new_config["prediction_type"] = "epsilon" if predict_epsilon else "sample" - self.scheduler._internal_dict = FrozenDict(new_config) - - if generator is not None and generator.device.type != self.device.type and self.device.type != "mps": - message = ( - f"The `generator` device is `{generator.device}` and does not match the pipeline " - f"device `{self.device}`, so the `generator` will be ignored. " - f'Please use `torch.Generator(device="{self.device}")` instead.' - ) - deprecate( - "generator.device == 'cpu'", - "0.11.0", - message, - ) - generator = None - - # Sample gaussian noise to begin loop - if isinstance(self.unet.sample_size, int): - image_shape = (batch_size, self.unet.in_channels, self.unet.sample_size, self.unet.sample_size) - else: - image_shape = (batch_size, self.unet.in_channels, *self.unet.sample_size) - - if self.device.type == "mps": - # randn does not work reproducibly on mps - image = torch.randn(image_shape, generator=generator) - image = image.to(self.device) - else: - image = torch.randn(image_shape, generator=generator, device=self.device) - - # set step values - self.scheduler.set_timesteps(num_inference_steps) - - for t in self.progress_bar(self.scheduler.timesteps): - # 1. predict noise model_output - model_output = self.unet(image, t).sample - - # 2. compute previous image: x_t -> x_t-1 - image = self.scheduler.step(model_output, t, image, generator=generator).prev_sample - - image = (image / 2 + 0.5).clamp(0, 1) - image = image.cpu().permute(0, 2, 3, 1).numpy() - if output_type == "pil": - image = self.numpy_to_pil(image) - - if not return_dict: - return (image,) - - return ImagePipelineOutput(images=image) diff --git a/spaces/abdvl/datahub_qa_bot/docs/api/tutorials/adding-tags.md b/spaces/abdvl/datahub_qa_bot/docs/api/tutorials/adding-tags.md deleted file mode 100644 index 0d55a0d5708cfe421ea0a2da0ac8a29146959165..0000000000000000000000000000000000000000 --- a/spaces/abdvl/datahub_qa_bot/docs/api/tutorials/adding-tags.md +++ /dev/null @@ -1,205 +0,0 @@ -# Adding Tags On Datasets/Columns - -## Why Would You Add Tags? -Tags are informal, loosely controlled labels that help in search & discovery. They can be added to datasets, dataset schemas, or containers, for an easy way to label or categorize entities – without having to associate them to a broader business glossary or vocabulary. - -Fore more information about tags, refer to [About DataHub Tags](/docs/tags.md). - -### Goal Of This Guide -This guide will show you how to add a `CustomerAccount` tag to the `user_name` column of a dataset called `fct_users_created`. -Additionally, we will cover how to add a tag to the dataset itself. - -## Prerequisites -For this tutorial, you need to deploy DataHub Quickstart and ingest sample data. -For detailed steps, please refer to [Prepare Local DataHub Environment](/docs/api/tutorials/references/prepare-datahub.md). - -:::note -Before adding tags, you need to ensure the targeted dataset and the tag are already present in your datahub. -If you attempt to manipulate entities that do not exist, your operation will fail. -In this guide, we will be using data from a sample ingestion. -If you want to know how to create tags using APIs & SDKs, please refer to [Creating Tags](/docs/api/tutorials/creating-tags.md) and [Creating Datasets](/docs/api/tutorials/creating-datasets.md). -. -::: - - -## Add Tags With GraphQL - -:::note -Please note that there are two available endpoints (`:8000`, `:9002`) to access GraphQL. -For more information about the differences between these endpoints, please refer to [DataHub Metadata Service](../../../metadata-service/README.md#graphql-api) -::: -### GraphQL Explorer -GraphQL Explorer is the fastest way to experiment with GraphQL without any dependancies. -Navigate to GraphQL Explorer (`http://localhost:9002/api/graphiql`) and run the following query. - -```json -mutation addTags { - addTags( - input: { - tagUrns: ["urn:li:tag:Legacy"], - resourceUrn: "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_created,PROD)", - subResourceType:DATASET_FIELD, - subResource:"user_name"}) -} -``` - -Note that you can also add a tag on a dataset if you don't specify `subResourceType` and `subResource`. -```json -mutation addTags { - addTags( - input: { - tagUrns: ["urn:li:tag:Legacy"], - resourceUrn: "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_created,PROD)", - } - ) -} -``` - - -If you see the following response, the operation was successful: -```python -{ - "data": { - "addTags": true - }, - "extensions": {} -} -``` - - -### CURL - -With CURL, you need to provide tokens. To generate a token, please refer to [Generate Access Token](/docs/api/tutorials/references/generate-access-token.md). -With `accessToken`, you can run the following command. - -```shell -curl --location --request POST 'http://localhost:8080/api/graphql' \ ---header 'Authorization: Bearer ' \ ---header 'Content-Type: application/json' \ ---data-raw '{ "query": "mutation addTags { addTags(input: { tagUrns: [\"urn:li:tag:Legacy\"], resourceUrn: \"urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_created,PROD)\" }) }", "variables":{}}' -``` -Expected Response: -```json -{"data":{"addTags":true},"extensions":{}} -``` - - -## Add Tags With Python SDK - -The following code adds a tag named `Legacy` to the column `user_name` of the hive dataset `fct_users_created`. -You can refer to the full code in [dataset_add_column_tag.py](https://github.com/datahub-project/datahub/blob/master/metadata-ingestion/examples/library/dataset_add_column_tag.py). -```python -# inlined from metadata-ingestion/examples/library/dataset_add_column_tag.py -import logging -import time - -from datahub.emitter.mce_builder import make_dataset_urn, make_tag_urn -from datahub.emitter.mcp import MetadataChangeProposalWrapper - -# read-modify-write requires access to the DataHubGraph (RestEmitter is not enough) -from datahub.ingestion.graph.client import DatahubClientConfig, DataHubGraph - -# Imports for metadata model classes -from datahub.metadata.schema_classes import ( - AuditStampClass, - EditableSchemaFieldInfoClass, - EditableSchemaMetadataClass, - GlobalTagsClass, - TagAssociationClass, -) - -log = logging.getLogger(__name__) -logging.basicConfig(level=logging.INFO) - - -def get_simple_field_path_from_v2_field_path(field_path: str) -> str: - """A helper function to extract simple . path notation from the v2 field path""" - if not field_path.startswith("[version=2.0]"): - # not a v2, we assume this is a simple path - return field_path - # this is a v2 field path - tokens = [ - t for t in field_path.split(".") if not (t.startswith("[") or t.endswith("]")) - ] - - return ".".join(tokens) - - -# Inputs -> the column, dataset and the tag to set -column = "user_name" -dataset_urn = make_dataset_urn(platform="hive", name="fct_users_created", env="PROD") -tag_to_add = make_tag_urn("Legacy") - - -# First we get the current editable schema metadata -gms_endpoint = "http://localhost:8080" -graph = DataHubGraph(DatahubClientConfig(server=gms_endpoint)) - - -current_editable_schema_metadata = graph.get_aspect( - entity_urn=dataset_urn, - aspect_type=EditableSchemaMetadataClass, -) - - -# Some pre-built objects to help all the conditional pathways -tag_association_to_add = TagAssociationClass(tag=tag_to_add) -tags_aspect_to_set = GlobalTagsClass(tags=[tag_association_to_add]) -field_info_to_set = EditableSchemaFieldInfoClass( - fieldPath=column, globalTags=tags_aspect_to_set -) - - -need_write = False -field_match = False -if current_editable_schema_metadata: - for fieldInfo in current_editable_schema_metadata.editableSchemaFieldInfo: - if get_simple_field_path_from_v2_field_path(fieldInfo.fieldPath) == column: - # we have some editable schema metadata for this field - field_match = True - if fieldInfo.globalTags: - if tag_to_add not in [x.tag for x in fieldInfo.globalTags.tags]: - # this tag is not present - fieldInfo.globalTags.tags.append(tag_association_to_add) - need_write = True - else: - fieldInfo.globalTags = tags_aspect_to_set - need_write = True - - if not field_match: - # this field isn't present in the editable schema metadata aspect, add it - field_info = field_info_to_set - current_editable_schema_metadata.editableSchemaFieldInfo.append(field_info) - need_write = True - -else: - # create a brand new editable schema metadata aspect - now = int(time.time() * 1000) # milliseconds since epoch - current_timestamp = AuditStampClass(time=now, actor="urn:li:corpuser:ingestion") - current_editable_schema_metadata = EditableSchemaMetadataClass( - editableSchemaFieldInfo=[field_info_to_set], - created=current_timestamp, - ) - need_write = True - -if need_write: - event: MetadataChangeProposalWrapper = MetadataChangeProposalWrapper( - entityUrn=dataset_urn, - aspect=current_editable_schema_metadata, - ) - graph.emit(event) - log.info(f"Tag {tag_to_add} added to column {column} of dataset {dataset_urn}") - -else: - log.info(f"Tag {tag_to_add} already attached to column {column}, omitting write") -``` - -We're using the `MetdataChangeProposalWrapper` to change entities in this example. -For more information about the `MetadataChangeProposal`, please refer to [MetadataChangeProposal & MetadataChangeLog Events](/docs/advanced/mcp-mcl.md) - - -## Expected Outcomes -You can now see `CustomerAccount` tag has been added to `user_name` column. - -![tag-added](../../imgs/apis/tutorials/tag-added.png) - diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py deleted file mode 100644 index c735298487e14e4a0ec42913f25673cccb98a8a0..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py +++ /dev/null @@ -1,55 +0,0 @@ -import numpy as np -import torch - -from ..builder import BBOX_SAMPLERS -from .random_sampler import RandomSampler - - -@BBOX_SAMPLERS.register_module() -class InstanceBalancedPosSampler(RandomSampler): - """Instance balanced sampler that samples equal number of positive samples - for each instance.""" - - def _sample_pos(self, assign_result, num_expected, **kwargs): - """Sample positive boxes. - - Args: - assign_result (:obj:`AssignResult`): The assigned results of boxes. - num_expected (int): The number of expected positive samples - - Returns: - Tensor or ndarray: sampled indices. - """ - pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False) - if pos_inds.numel() != 0: - pos_inds = pos_inds.squeeze(1) - if pos_inds.numel() <= num_expected: - return pos_inds - else: - unique_gt_inds = assign_result.gt_inds[pos_inds].unique() - num_gts = len(unique_gt_inds) - num_per_gt = int(round(num_expected / float(num_gts)) + 1) - sampled_inds = [] - for i in unique_gt_inds: - inds = torch.nonzero( - assign_result.gt_inds == i.item(), as_tuple=False) - if inds.numel() != 0: - inds = inds.squeeze(1) - else: - continue - if len(inds) > num_per_gt: - inds = self.random_choice(inds, num_per_gt) - sampled_inds.append(inds) - sampled_inds = torch.cat(sampled_inds) - if len(sampled_inds) < num_expected: - num_extra = num_expected - len(sampled_inds) - extra_inds = np.array( - list(set(pos_inds.cpu()) - set(sampled_inds.cpu()))) - if len(extra_inds) > num_extra: - extra_inds = self.random_choice(extra_inds, num_extra) - extra_inds = torch.from_numpy(extra_inds).to( - assign_result.gt_inds.device).long() - sampled_inds = torch.cat([sampled_inds, extra_inds]) - elif len(sampled_inds) > num_expected: - sampled_inds = self.random_choice(sampled_inds, num_expected) - return sampled_inds diff --git a/spaces/abidlabs/gradio-discord-bot-server/README.md b/spaces/abidlabs/gradio-discord-bot-server/README.md deleted file mode 100644 index 03dcd1231c14783b1ad5a03d0df8bad6553578d3..0000000000000000000000000000000000000000 --- a/spaces/abidlabs/gradio-discord-bot-server/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Gradio Discord Bot Server -emoji: 📚 -colorFrom: purple -colorTo: pink -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/media/drivers/xaudio2/lib_xaudio2.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/media/drivers/xaudio2/lib_xaudio2.py deleted file mode 100644 index 3ee7f30b5897c90b31760a8dad48b727b1c381e8..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/media/drivers/xaudio2/lib_xaudio2.py +++ /dev/null @@ -1,678 +0,0 @@ -import ctypes -import platform -import os -from pyglet.libs.win32.constants import * -from pyglet.libs.win32.types import * -from pyglet.libs.win32 import com -from pyglet.util import debug_print - -_debug = debug_print('debug_media') - - -def load_xaudio2(dll_name): - """This will attempt to load a version of XAudio2. Versions supported: 2.9, 2.8. - While Windows 8 ships with 2.8 and Windows 10 ships with version 2.9, it is possible to install 2.9 on 8/8.1. - """ - xaudio2 = dll_name - # System32 and SysWOW64 folders are opposite perception in Windows x64. - # System32 = x64 dll's | SysWOW64 = x86 dlls - # By default ctypes only seems to look in system32 regardless of Python architecture, which has x64 dlls. - if platform.architecture()[0] == '32bit': - if platform.machine().endswith('64'): # Machine is 64 bit, Python is 32 bit. - xaudio2 = os.path.join(os.environ['WINDIR'], 'SysWOW64', '{}.dll'.format(xaudio2)) - - xaudio2_lib = ctypes.windll.LoadLibrary(xaudio2) - - # Somehow x3d uses different calling structure than the rest of the DLL; Only affects 32 bit? Microsoft... - x3d_lib = ctypes.cdll.LoadLibrary(xaudio2) - return xaudio2_lib, x3d_lib - - -try: - xaudio2_lib, x3d_lib = load_xaudio2("xaudio2_9") -except OSError: - _debug("Could not load XAudio2.9 library") - try: - xaudio2_lib, x3d_lib = load_xaudio2("xaudio2_8") - except OSError: - _debug("Could not load XAudio2.8 library") - raise ImportError('Could not locate a supported XAudio2 library.') - - -UINT32 = c_uint32 -FLOAT32 = c_float - - -class XAUDIO2_DEBUG_CONFIGURATION(ctypes.Structure): - _fields_ = [ - ('TraceMask', UINT32), - ('BreakMask', UINT32), - ('LogThreadID', BOOL), - ('LogFileline', BOOL), - ('LogFunctionName', BOOL), - ('LogTiming', BOOL), - ] - - -class XAUDIO2_PERFORMANCE_DATA(ctypes.Structure): - _fields_ = [ - ('AudioCyclesSinceLastQuery', c_uint64), - ('TotalCyclesSinceLastQuery', c_uint64), - ('MinimumCyclesPerQuantum', UINT32), - ('MaximumCyclesPerQuantum', UINT32), - ('MemoryUsageInBytes', UINT32), - ('CurrentLatencyInSamples', UINT32), - ('GlitchesSinceEngineStarted', UINT32), - ('ActiveSourceVoiceCount', UINT32), - ('TotalSourceVoiceCount', UINT32), - ('ActiveSubmixVoiceCount', UINT32), - ('ActiveResamplerCount', UINT32), - ('ActiveMatrixMixCount', UINT32), - ('ActiveXmaSourceVoices', UINT32), - ('ActiveXmaStreams', UINT32), - ] - - def __repr__(self): - return "XAUDIO2PerformanceData(active_voices={}, total_voices={}, glitches={}, latency={} samples, memory_usage={} bytes)".format(self.ActiveSourceVoiceCount, self.TotalSourceVoiceCount, self.GlitchesSinceEngineStarted, self.CurrentLatencyInSamples, self.MemoryUsageInBytes) - - -class XAUDIO2_VOICE_SENDS(ctypes.Structure): - _fields_ = [ - ('SendCount', UINT32), - ('pSends', c_void_p), - ] - - -class XAUDIO2_BUFFER(ctypes.Structure): - _fields_ = [ - ('Flags', UINT32), - ('AudioBytes', UINT32), - ('pAudioData', POINTER(c_char)), - ('PlayBegin', UINT32), - ('PlayLength', UINT32), - ('LoopBegin', UINT32), - ('LoopLength', UINT32), - ('LoopCount', UINT32), - ('pContext', c_void_p), - ] - -class XAUDIO2_VOICE_STATE(ctypes.Structure): - _fields_ = [ - ('pCurrentBufferContext', c_void_p), - ('BuffersQueued', UINT32), - ('SamplesPlayed', UINT32) - ] - - def __repr__(self): - return "XAUDIO2_VOICE_STATE(BuffersQueued={0}, SamplesPlayed={1})".format(self.BuffersQueued, self.SamplesPlayed) - -class WAVEFORMATEX(ctypes.Structure): - _fields_ = [ - ('wFormatTag', WORD), - ('nChannels', WORD), - ('nSamplesPerSec', DWORD), - ('nAvgBytesPerSec', DWORD), - ('nBlockAlign', WORD), - ('wBitsPerSample', WORD), - ('cbSize', WORD), - ] - - def __repr__(self): - return 'WAVEFORMATEX(wFormatTag={}, nChannels={}, nSamplesPerSec={}, nAvgBytesPersec={}' \ - ', nBlockAlign={}, wBitsPerSample={}, cbSize={})'.format( - self.wFormatTag, self.nChannels, self.nSamplesPerSec, - self.nAvgBytesPerSec, self.nBlockAlign, self.wBitsPerSample, - self.cbSize) - -XAUDIO2_USE_DEFAULT_PROCESSOR = 0x00000000 # Win 10+ - -if WINDOWS_10_ANNIVERSARY_UPDATE_OR_GREATER: - XAUDIO2_DEFAULT_PROCESSOR = XAUDIO2_USE_DEFAULT_PROCESSOR -else: - XAUDIO2_DEFAULT_PROCESSOR = 0x00000001 # Windows 8/8.1 - - -XAUDIO2_LOG_ERRORS = 0x0001 # For handled errors with serious effects. -XAUDIO2_LOG_WARNINGS = 0x0002 # For handled errors that may be recoverable. -XAUDIO2_LOG_INFO = 0x0004 # Informational chit-chat (e.g. state changes). -XAUDIO2_LOG_DETAIL = 0x0008 # More detailed chit-chat. -XAUDIO2_LOG_API_CALLS = 0x0010 # Public API function entries and exits. -XAUDIO2_LOG_FUNC_CALLS = 0x0020 # Internal function entries and exits. -XAUDIO2_LOG_TIMING = 0x0040 # Delays detected and other timing data. -XAUDIO2_LOG_LOCKS = 0x0080 # Usage of critical sections and mutexes. -XAUDIO2_LOG_MEMORY = 0x0100 # Memory heap usage information. -XAUDIO2_LOG_STREAMING = 0x1000 # Audio streaming information. - - -# Some XAUDIO2 global settings, most not used, but useful information -XAUDIO2_MAX_BUFFER_BYTES = 0x80000000 # Maximum bytes allowed in a source buffer -XAUDIO2_MAX_QUEUED_BUFFERS = 64 # Maximum buffers allowed in a voice queue -XAUDIO2_MAX_BUFFERS_SYSTEM = 2 # Maximum buffers allowed for system threads (Xbox 360 only) -XAUDIO2_MAX_AUDIO_CHANNELS = 64 # Maximum channels in an audio stream -XAUDIO2_MIN_SAMPLE_RATE = 1000 # Minimum audio sample rate supported -XAUDIO2_MAX_SAMPLE_RATE = 200000 # Maximum audio sample rate supported -XAUDIO2_MAX_VOLUME_LEVEL = 16777216.0 # Maximum acceptable volume level (2^24) -XAUDIO2_MIN_FREQ_RATIO = (1/1024.0) # Minimum SetFrequencyRatio argument -XAUDIO2_MAX_FREQ_RATIO = 1024.0 # Maximum MaxFrequencyRatio argument -XAUDIO2_DEFAULT_FREQ_RATIO = 2.0 # Default MaxFrequencyRatio argument -XAUDIO2_MAX_FILTER_ONEOVERQ = 1.5 # Maximum XAUDIO2_FILTER_PARAMETERS.OneOverQ -XAUDIO2_MAX_FILTER_FREQUENCY = 1.0 # Maximum XAUDIO2_FILTER_PARAMETERS.Frequency -XAUDIO2_MAX_LOOP_COUNT = 254 # Maximum non-infinite XAUDIO2_BUFFER.LoopCount -XAUDIO2_MAX_INSTANCES = 8 # Maximum simultaneous XAudio2 objects on Xbox 360 - - -XAUDIO2_FILTER_TYPE = UINT -LowPassFilter = 0 # Attenuates frequencies above the cutoff frequency (state-variable filter). -BandPassFilter = 1 # Attenuates frequencies outside a given range (state-variable filter). -HighPassFilter = 2 # Attenuates frequencies below the cutoff frequency (state-variable filter). -NotchFilter = 3 # Attenuates frequencies inside a given range (state-variable filter). -LowPassOnePoleFilter = 4 # Attenuates frequencies above the cutoff frequency (one-pole filter, XAUDIO2_FILTER_PARAMETERS.OneOverQ has no effect) -HighPassOnePoleFilter = 5 # Attenuates frequencies below the cutoff frequency (one-pole filter, XAUDIO2_FILTER_PARAMETERS.OneOverQ has no effect) - -XAUDIO2_NO_LOOP_REGION = 0 # Used in XAUDIO2_BUFFER.LoopCount -XAUDIO2_LOOP_INFINITE = 255 # Used in XAUDIO2_BUFFER.LoopCount -XAUDIO2_DEFAULT_CHANNELS = 0 # Used in CreateMasteringVoice -XAUDIO2_DEFAULT_SAMPLERATE = 0 # Used in CreateMasteringVoice - -WAVE_FORMAT_PCM = 1 - -XAUDIO2_DEBUG_ENGINE = 0x0001 # Used in XAudio2Create -XAUDIO2_VOICE_NOPITCH = 0x0002 # Used in IXAudio2::CreateSourceVoice -XAUDIO2_VOICE_NOSRC = 0x0004 # Used in IXAudio2::CreateSourceVoice -XAUDIO2_VOICE_USEFILTER = 0x0008 # Used in IXAudio2::CreateSource/SubmixVoice -XAUDIO2_PLAY_TAILS = 0x0020 # Used in IXAudio2SourceVoice::Stop -XAUDIO2_END_OF_STREAM = 0x0040 # Used in XAUDIO2_BUFFER.Flags -XAUDIO2_SEND_USEFILTER = 0x0080 # Used in XAUDIO2_SEND_DESCRIPTOR.Flags -XAUDIO2_VOICE_NOSAMPLESPLAYED = 0x0100 # Used in IXAudio2SourceVoice::GetState -XAUDIO2_STOP_ENGINE_WHEN_IDLE = 0x2000 # Used in XAudio2Create to force the engine to Stop when no source voices are Started, and Start when a voice is Started -XAUDIO2_1024_QUANTUM = 0x8000 # Used in XAudio2Create to specify nondefault processing quantum of 21.33 ms (1024 samples at 48KHz) -XAUDIO2_NO_VIRTUAL_AUDIO_CLIENT = 0x10000 # Used in CreateMasteringVoice to create a virtual audio client - - -class IXAudio2VoiceCallback(com.Interface): - _methods_ = [ - ('OnVoiceProcessingPassStart', - com.STDMETHOD(UINT32)), - ('OnVoiceProcessingPassEnd', - com.STDMETHOD()), - ('onStreamEnd', - com.STDMETHOD()), - ('onBufferStart', - com.STDMETHOD(ctypes.c_void_p)), - ('OnBufferEnd', - com.STDMETHOD(ctypes.c_void_p)), - ('OnLoopEnd', - com.STDMETHOD(ctypes.c_void_p)), - ] - - -class XA2SourceCallback(com.COMObject): - """Callback class used to trigger when buffers or streams end.. - WARNING: Whenever a callback is running, XAudio2 cannot generate audio. - Make sure these functions run as fast as possible and do not block/delay more than a few milliseconds. - MS Recommendation: - At a minimum, callback functions must not do the following: - - Access the hard disk or other permanent storage - - Make expensive or blocking API calls - - Synchronize with other parts of client code - - Require significant CPU usage - """ - _interfaces_ = [IXAudio2VoiceCallback] - - def __init__(self, xa2_player): - self.xa2_player = xa2_player - - def OnVoiceProcessingPassStart(self, bytesRequired): - pass - - def OnVoiceProcessingPassEnd(self): - pass - - def onStreamEnd(self): - pass - - def onBufferStart(self, pBufferContext): - pass - - def OnBufferEnd(self, pBufferContext): - """At the end of playing one buffer, attempt to refill again. - Even if the player is out of sources, it needs to be called to purge all buffers. - """ - if self.xa2_player: - self.xa2_player.refill_source_player() - - def OnLoopEnd(self, this, pBufferContext): - pass - - def onVoiceError(self, this, pBufferContext, hresult): - raise Exception("Error occurred during audio playback.", hresult) - - -class XAUDIO2_EFFECT_DESCRIPTOR(Structure): - _fields_ = [ - ('pEffect', com.pIUnknown), - ('InitialState', c_bool), - ('OutputChannels', UINT32) - ] - - -class XAUDIO2_EFFECT_CHAIN(ctypes.Structure): - _fields_ = [ - ('EffectCount', UINT32), - ('pEffectDescriptors', POINTER(XAUDIO2_EFFECT_DESCRIPTOR)), - ] - - -class XAUDIO2_FILTER_PARAMETERS(Structure): - _fields_ = [ - ('Type', XAUDIO2_FILTER_TYPE), - ('Frequency', FLOAT), - ('OneOverQ', FLOAT) - ] - - -class XAUDIO2_VOICE_DETAILS(Structure): - _fields_ = [ - ('CreationFlags', UINT32), - ('ActiveFlags', UINT32), - ('InputChannels', UINT32), - ('InputSampleRate', UINT32) - ] - - -class IXAudio2Voice(com.pInterface): - _methods_ = [ - ('GetVoiceDetails', - com.STDMETHOD(POINTER(XAUDIO2_VOICE_DETAILS))), - ('SetOutputVoices', - com.STDMETHOD()), - ('SetEffectChain', - com.STDMETHOD(POINTER(XAUDIO2_EFFECT_CHAIN))), - ('EnableEffect', - com.STDMETHOD()), - ('DisableEffect', - com.STDMETHOD()), - ('GetEffectState', - com.STDMETHOD()), - ('SetEffectParameters', - com.STDMETHOD()), - ('GetEffectParameters', - com.STDMETHOD()), - ('SetFilterParameters', - com.STDMETHOD(POINTER(XAUDIO2_FILTER_PARAMETERS), UINT32)), - ('GetFilterParameters', - com.STDMETHOD()), - ('SetOutputFilterParameters', - com.STDMETHOD()), - ('GetOutputFilterParameters', - com.STDMETHOD()), - ('SetVolume', - com.STDMETHOD(ctypes.c_float, UINT32)), - ('GetVolume', - com.STDMETHOD(POINTER(c_float))), - ('SetChannelVolumes', - com.STDMETHOD()), - ('GetChannelVolumes', - com.STDMETHOD()), - ('SetOutputMatrix', - com.STDMETHOD(c_void_p, UINT32, UINT32, POINTER(FLOAT), UINT32)), - ('GetOutputMatrix', - com.STDMETHOD()), - ('DestroyVoice', - com.STDMETHOD()) - ] - - -class IXAudio2SubmixVoice(IXAudio2Voice): - pass - - -class IXAudio2SourceVoice(IXAudio2Voice): - _methods_ = [ - ('Start', - com.STDMETHOD(UINT32, UINT32)), - ('Stop', - com.STDMETHOD(UINT32, UINT32)), - ('SubmitSourceBuffer', - com.STDMETHOD(POINTER(XAUDIO2_BUFFER), c_void_p)), - ('FlushSourceBuffers', - com.STDMETHOD()), - ('Discontinuity', - com.STDMETHOD()), - ('ExitLoop', - com.STDMETHOD()), - ('GetState', - com.STDMETHOD(POINTER(XAUDIO2_VOICE_STATE), UINT32)), - ('SetFrequencyRatio', - com.STDMETHOD(FLOAT, UINT32)), - ('GetFrequencyRatio', - com.STDMETHOD(POINTER(c_float))), - ('SetSourceSampleRate', - com.STDMETHOD()), - ] - - -class IXAudio2MasteringVoice(IXAudio2Voice): - _methods_ = [ - ('GetChannelMask', - com.STDMETHOD(POINTER(DWORD))) - ] - - -class IXAudio2EngineCallback(com.Interface): - _methods_ = [ - ('OnProcessingPassStart', - com.METHOD(ctypes.c_void_p)), - ('OnProcessingPassEnd', - com.METHOD(ctypes.c_void_p)), - ('OnCriticalError', - com.METHOD(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_ulong)), - ] - - -class XA2EngineCallback(com.COMObject): - _interfaces_ = [IXAudio2EngineCallback] - - def OnProcessingPassStart(self): - pass - - def OnProcessingPassEnd(self): - pass - - def OnCriticalError(self, this, hresult): - raise Exception("Critical Error:", hresult) - - - -# -------------- 3D Audio Positioning---------- -class X3DAUDIO_DISTANCE_CURVE_POINT(ctypes.Structure): - _fields_ = [ - ('Distance', FLOAT32), - ('DSPSetting', FLOAT32) - ] - - -class X3DAUDIO_DISTANCE_CURVE(ctypes.Structure): - _fields_ = [ - ('pPoints', POINTER(X3DAUDIO_DISTANCE_CURVE_POINT)), - ('PointCount', UINT32) - ] - - -class X3DAUDIO_VECTOR(ctypes.Structure): - _fields_ = [ - ('x', c_float), - ('y', c_float), - ('z', c_float), - ] - - - -"""Cone: - Specifies directionality for a listener or single-channel emitter by - modifying DSP behaviour with respect to its front orientation. - This is modeled using two sound cones: an inner cone and an outer cone. - On/within the inner cone, DSP settings are scaled by the inner values. - On/beyond the outer cone, DSP settings are scaled by the outer values. - If on both the cones, DSP settings are scaled by the inner values only. - Between the two cones, the scaler is linearly interpolated between the - inner and outer values. Set both cone angles to 0 or X3DAUDIO_2PI for - omnidirectionality using only the outer or inner values respectively.""" -class X3DAUDIO_CONE(Structure): - _fields_ = [ - ('InnerAngle', FLOAT32), # inner cone angle in radians, must be within [0.0f, X3DAUDIO_2PI] - ('OuterAngle', FLOAT32), # outer cone angle in radians, must be within [InnerAngle, X3DAUDIO_2PI] - ('InnerVolume', FLOAT32), # volume level scaler on/within inner cone, used only for matrix calculations, must be within [0.0f, 2.0f] when used - ('OuterVolume', FLOAT32), # volume level scaler on/beyond outer cone, used only for matrix calculations, must be within [0.0f, 2.0f] when used - ('InnerLPF', FLOAT32), # LPF (both direct and reverb paths) coefficient subtrahend on/within inner cone, used only for LPF (both direct and reverb paths) calculations, must be within [0.0f, 1.0f] when used - ('OuterLPF', FLOAT32), # LPF (both direct and reverb paths) coefficient subtrahend on/beyond outer cone, used only for LPF (both direct and reverb paths) calculations, must be within [0.0f, 1.0f] when used - ('InnerReverb', FLOAT32), # reverb send level scaler on/within inner cone, used only for reverb calculations, must be within [0.0f, 2.0f] when used - ('OuterReverb', FLOAT32) # reverb send level scaler on/beyond outer cone, used only for reverb calculations, must be within [0.0f, 2.0f] when used - ] - - -class X3DAUDIO_LISTENER(Structure): - _fields_ = [ - ('OrientFront', X3DAUDIO_VECTOR), # orientation of front direction, used only for matrix and delay calculations or listeners with cones for matrix, LPF (both direct and reverb paths), and reverb calculations, must be normalized when used - ('OrientTop', X3DAUDIO_VECTOR), # orientation of top direction, used only for matrix and delay calculations, must be orthonormal with OrientFront when used - ('Position', X3DAUDIO_VECTOR), # position in user-defined world units, does not affect Velocity - ('Velocity', X3DAUDIO_VECTOR), # velocity vector in user-defined world units/second, used only for doppler calculations, does not affect Position - ('pCone', POINTER(X3DAUDIO_CONE)) # sound cone, used only for matrix, LPF (both direct and reverb paths), and reverb calculations, NULL specifies omnidirectionality - ] - - -class X3DAUDIO_EMITTER(Structure): - _fields_ = [ - ('pCone', POINTER(X3DAUDIO_CONE)), - ('OrientFront', X3DAUDIO_VECTOR), - ('OrientTop', X3DAUDIO_VECTOR), - ('Position', X3DAUDIO_VECTOR), - ('Velocity', X3DAUDIO_VECTOR), - ('InnerRadius', FLOAT32), - ('InnerRadiusAngle', FLOAT32), - ('ChannelCount', UINT32), - ('ChannelRadius', FLOAT32), - ('pChannelAzimuths', POINTER(FLOAT32)), - ('pVolumeCurve', POINTER(X3DAUDIO_DISTANCE_CURVE)), - ('pLFECurve', POINTER(X3DAUDIO_DISTANCE_CURVE)), - ('pLPFDirectCurve', POINTER(X3DAUDIO_DISTANCE_CURVE)), - ('pLPFReverbCurve', POINTER(X3DAUDIO_DISTANCE_CURVE)), - ('pReverbCurve', POINTER(X3DAUDIO_DISTANCE_CURVE)), - ('CurveDistanceScaler', FLOAT32), - ('DopplerScaler', FLOAT32) - ] - - -class X3DAUDIO_DSP_SETTINGS(Structure): - _fields_ = [ - ('pMatrixCoefficients', POINTER(FLOAT)), # float array - ('pDelayTimes', POINTER(FLOAT32)), - ('SrcChannelCount', UINT32), - ('DstChannelCount', UINT32), - ('LPFDirectCoefficient', FLOAT32), - ('LPFReverbCoefficient', FLOAT32), - ('ReverbLevel', FLOAT32), - ('DopplerFactor', FLOAT32), - ('EmitterToListenerAngle', FLOAT32), - ('EmitterToListenerDistance', FLOAT32), - ('EmitterVelocityComponent', FLOAT32), - ('ListenerVelocityComponent', FLOAT32) - ] - -# Other constants that may or may not be used in X3D. - -SPEAKER_FRONT_LEFT = 0x00000001 -SPEAKER_FRONT_RIGHT = 0x00000002 -SPEAKER_FRONT_CENTER = 0x00000004 -SPEAKER_LOW_FREQUENCY = 0x00000008 -SPEAKER_BACK_LEFT = 0x00000010 -SPEAKER_BACK_RIGHT = 0x00000020 -SPEAKER_FRONT_LEFT_OF_CENTER = 0x00000040 -SPEAKER_FRONT_RIGHT_OF_CENTER = 0x00000080 -SPEAKER_BACK_CENTER = 0x00000100 -SPEAKER_SIDE_LEFT = 0x00000200 -SPEAKER_SIDE_RIGHT = 0x00000400 -SPEAKER_TOP_CENTER = 0x00000800 -SPEAKER_TOP_FRONT_LEFT = 0x00001000 -SPEAKER_TOP_FRONT_CENTER = 0x00002000 -SPEAKER_TOP_FRONT_RIGHT = 0x00004000 -SPEAKER_TOP_BACK_LEFT = 0x00008000 -SPEAKER_TOP_BACK_CENTER = 0x00010000 -SPEAKER_TOP_BACK_RIGHT = 0x00020000 -SPEAKER_RESERVED = 0x7FFC0000 # bit mask locations reserved for future use -SPEAKER_ALL = 0x80000000 # used to specify that any possible permutation of speaker configurations - -SPEAKER_MONO = SPEAKER_FRONT_CENTER -SPEAKER_STEREO = (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT) -SPEAKER_2POINT1 = (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_LOW_FREQUENCY) -SPEAKER_SURROUND = (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_BACK_CENTER) -SPEAKER_QUAD = (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT) -SPEAKER_4POINT1 = (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT) -SPEAKER_5POINT1 = (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT) -SPEAKER_7POINT1 = (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT | SPEAKER_FRONT_LEFT_OF_CENTER | SPEAKER_FRONT_RIGHT_OF_CENTER) -SPEAKER_5POINT1_SURROUND = (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_SIDE_LEFT | SPEAKER_SIDE_RIGHT) -SPEAKER_7POINT1_SURROUND = (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT | SPEAKER_SIDE_LEFT | SPEAKER_SIDE_RIGHT) - - -DBL_DECIMAL_DIG = 17 # # of decimal digits of rounding precision -DBL_DIG = 15 # # of decimal digits of precision -DBL_EPSILON = 2.2204460492503131e-016 # smallest such that 1.0+DBL_EPSILON != 1.0 -DBL_HAS_SUBNORM = 1 # type does support subnormal numbers -DBL_MANT_DIG = 53 # # of bits in mantissa -DBL_MAX = 1.7976931348623158e+308 # max value -DBL_MAX_10_EXP = 308 # max decimal exponent -DBL_MAX_EXP = 1024 # max binary exponent -DBL_MIN = 2.2250738585072014e-308 # min positive value -DBL_MIN_10_EXP = (-307) # min decimal exponent -DBL_MIN_EXP = (-1021) # min binary exponent -_DBL_RADIX = 2 # exponent radix -DBL_TRUE_MIN = 4.9406564584124654e-324 # min positive value - -FLT_DECIMAL_DIG = 9 # # of decimal digits of rounding precision -FLT_DIG = 6 # # of decimal digits of precision -FLT_EPSILON = 1.192092896e-07 # smallest such that 1.0+FLT_EPSILON != 1.0 -FLT_HAS_SUBNORM = 1 # type does support subnormal numbers -FLT_GUARD = 0 -FLT_MANT_DIG = 24 # # of bits in mantissa -FLT_MAX = 3.402823466e+38 # max value -FLT_MAX_10_EXP = 38 # max decimal exponent -FLT_MAX_EXP = 128 # max binary exponent -FLT_MIN = 1.175494351e-38 # min normalized positive value -FLT_MIN_10_EXP = (-37) # min decimal exponent -FLT_MIN_EXP = (-125) # min binary exponent -FLT_NORMALIZE = 0 -FLT_RADIX = 2 # exponent radix -FLT_TRUE_MIN = 1.401298464e-45 # min positive value - -LDBL_DIG = DBL_DIG # # of decimal digits of precision -LDBL_EPSILON = DBL_EPSILON # smallest such that 1.0+LDBL_EPSILON != 1.0 -LDBL_HAS_SUBNORM = DBL_HAS_SUBNORM # type does support subnormal numbers -LDBL_MANT_DIG = DBL_MANT_DIG # # of bits in mantissa -LDBL_MAX = DBL_MAX # max value -LDBL_MAX_10_EXP = DBL_MAX_10_EXP # max decimal exponent -LDBL_MAX_EXP = DBL_MAX_EXP # max binary exponent -LDBL_MIN = DBL_MIN # min normalized positive value -LDBL_MIN_10_EXP = DBL_MIN_10_EXP # min decimal exponent -LDBL_MIN_EXP = DBL_MIN_EXP # min binary exponent -_LDBL_RADIX = _DBL_RADIX # exponent radix -LDBL_TRUE_MIN = DBL_TRUE_MIN # min positive value - -DECIMAL_DIG = DBL_DECIMAL_DIG - - -X3DAUDIO_HANDLE_BYTESIZE = 20 -X3DAUDIO_HANDLE = (BYTE * X3DAUDIO_HANDLE_BYTESIZE) - - -# speed of sound in meters per second for dry air at approximately 20C, used with X3DAudioInitialize -X3DAUDIO_SPEED_OF_SOUND = 343.5 - - -X3DAUDIO_CALCULATE_MATRIX = 0x00000001 # enable matrix coefficient table calculation -X3DAUDIO_CALCULATE_DELAY = 0x00000002 # enable delay time array calculation (stereo final mix only) -X3DAUDIO_CALCULATE_LPF_DIRECT = 0x00000004 # enable LPF direct-path coefficient calculation -X3DAUDIO_CALCULATE_LPF_REVERB = 0x00000008 # enable LPF reverb-path coefficient calculation -X3DAUDIO_CALCULATE_REVERB = 0x00000010 # enable reverb send level calculation -X3DAUDIO_CALCULATE_DOPPLER = 0x00000020 # enable doppler shift factor calculation -X3DAUDIO_CALCULATE_EMITTER_ANGLE = 0x00000040 # enable emitter-to-listener interior angle calculation -X3DAUDIO_CALCULATE_ZEROCENTER = 0x00010000 # do not position to front center speaker, signal positioned to remaining speakers instead, front center destination channel will be zero in returned matrix coefficient table, valid only for matrix calculations with final mix formats that have a front center channel -X3DAUDIO_CALCULATE_REDIRECT_TO_LFE = 0x00020000 # apply equal mix of all source channels to LFE destination channel, valid only for matrix calculations with sources that have no LFE channel and final mix formats that have an LFE channel - -default_dsp_calculation = X3DAUDIO_CALCULATE_MATRIX | X3DAUDIO_CALCULATE_DOPPLER - -X3DAudioInitialize = x3d_lib.X3DAudioInitialize -X3DAudioInitialize.restype = HRESULT -X3DAudioInitialize.argtypes = [c_int, c_float, c_void_p] - - -X3DAudioCalculate = x3d_lib.X3DAudioCalculate -X3DAudioCalculate.restype = c_void -X3DAudioCalculate.argtypes = [POINTER(X3DAUDIO_HANDLE), POINTER(X3DAUDIO_LISTENER), POINTER(X3DAUDIO_EMITTER), UINT32, POINTER(X3DAUDIO_DSP_SETTINGS)] - - -AudioCategory_Other = 0 -AudioCategory_ForegroundOnlyMedia = 1 -AudioCategory_Communications = 3 -AudioCategory_Alerts = 4 -AudioCategory_SoundEffects = 5 -AudioCategory_GameEffects = 6 -AudioCategory_GameMedia = 7 -AudioCategory_GameChat = 8 -AudioCategory_Speech = 9 -AudioCategory_Movie = 10 -AudioCategory_Media = 11 - -# Reverb not implemented but if someone wants to take a stab at it. -class XAUDIO2FX_REVERB_PARAMETERS(Structure): - _fields_ = [ - ('WetDryMix', c_float), # ratio of wet (processed) signal to dry (original) signal - - # Delay times - ('ReflectionsDelay', UINT32), # [0, 300] in ms - ('ReverbDelay', BYTE), # [0, 85] in ms - ('RearDelay', UINT32), # 7.1: [0, 20] in ms, all other: [0, 5] in ms - ('SideDelay', UINT32), # .1: [0, 5] in ms, all other: not used, but still validated # WIN 10 only. - - # Indexed Paremeters - ('PositionLeft', BYTE), # [0, 30] no units - ('PositionRight', BYTE), # 0, 30] no units, ignored when configured to mono - ('PositionMatrixLeft', BYTE), # [0, 30] no units - ('PositionMatrixRight', BYTE), # [0, 30] no units, ignored when configured to mono - ('EarlyDiffusion', BYTE), # [0, 15] no units - ('LateDiffusion', BYTE), # [0, 15] no units - ('LowEQGain', BYTE), # [0, 12] no units - ('LowEQCutoff', BYTE), # [0, 9] no units - ('LowEQCutoff', BYTE), # [0, 8] no units - ('HighEQCutoff', BYTE), # [0, 14] no units - - # Direct parameters - ('RoomFilterFreq', c_float), # [20, 20000] in Hz - ('RoomFilterMain', c_float), # [-100, 0] in dB - ('RoomFilterHF', c_float), # [-100, 0] in dB - ('ReflectionsGain', c_float), # [-100, 20] in dB - ('ReverbGain', c_float), # [-100, 20] in dB - ('DecayTime', c_float), # [0.1, inf] in seconds - ('Density', c_float), # [0, 100] (percentage) - ('RoomSize', c_float), # [1, 100] in feet - - # component control - ('DisableLateField', c_bool), # TRUE to disable late field reflections - ] - - -class IXAudio2(com.pIUnknown): - _methods_ = [ - ('RegisterForCallbacks', - com.STDMETHOD(POINTER(IXAudio2EngineCallback))), - ('UnregisterForCallbacks', - com.METHOD(ctypes.c_void_p, POINTER(IXAudio2EngineCallback))), - ('CreateSourceVoice', - com.STDMETHOD(POINTER(IXAudio2SourceVoice), POINTER(WAVEFORMATEX), UINT32, c_float, - POINTER(IXAudio2VoiceCallback), POINTER(XAUDIO2_VOICE_SENDS), POINTER(XAUDIO2_EFFECT_CHAIN))), - ('CreateSubmixVoice', - com.STDMETHOD(POINTER(IXAudio2SubmixVoice), UINT32, UINT32, UINT32, UINT32, - POINTER(XAUDIO2_VOICE_SENDS), POINTER(XAUDIO2_EFFECT_CHAIN))), - ('CreateMasteringVoice', - com.STDMETHOD(POINTER(IXAudio2MasteringVoice), UINT32, UINT32, UINT32, LPCWSTR, POINTER(XAUDIO2_EFFECT_CHAIN), - UINT32)), - ('StartEngine', - com.STDMETHOD()), - ('StopEngine', - com.STDMETHOD()), - ('CommitChanges', - com.STDMETHOD(UINT32)), - ('GetPerformanceData', - com.METHOD(c_void, POINTER(XAUDIO2_PERFORMANCE_DATA))), - ('SetDebugConfiguration', - com.STDMETHOD(POINTER(XAUDIO2_DEBUG_CONFIGURATION), c_void_p)), - ] - - -XAudio2Create = xaudio2_lib.XAudio2Create -XAudio2Create.restype = HRESULT -XAudio2Create.argtypes = [POINTER(IXAudio2), UINT32, UINT32] - -CreateAudioReverb = xaudio2_lib.CreateAudioReverb -CreateAudioReverb.restype = HRESULT -CreateAudioReverb.argtypes = [POINTER(com.pIUnknown)] - diff --git a/spaces/acmyu/frame_interpolation_prototype/spectral.py b/spaces/acmyu/frame_interpolation_prototype/spectral.py deleted file mode 100644 index 39b2c037d1f68e3b4e7ad0b282139f73712ffaca..0000000000000000000000000000000000000000 --- a/spaces/acmyu/frame_interpolation_prototype/spectral.py +++ /dev/null @@ -1,68 +0,0 @@ -import torch -from torch.optim.optimizer import Optimizer, required - -from torch.autograd import Variable -import torch.nn.functional as F -from torch import nn -from torch import Tensor -from torch.nn import Parameter - -def l2normalize(v, eps=1e-12): - return v / (v.norm() + eps) - - -class SpectralNorm(nn.Module): - def __init__(self, module, name='weight', power_iterations=1): - super(SpectralNorm, self).__init__() - self.module = module - self.name = name - self.power_iterations = power_iterations - if not self._made_params(): - self._make_params() - - def _update_u_v(self): - u = getattr(self.module, self.name + "_u") - v = getattr(self.module, self.name + "_v") - w = getattr(self.module, self.name + "_bar") - - height = w.data.shape[0] - for _ in range(self.power_iterations): - v.data = l2normalize(torch.mv(torch.t(w.view(height,-1).data), u.data)) - u.data = l2normalize(torch.mv(w.view(height,-1).data, v.data)) - - # sigma = torch.dot(u.data, torch.mv(w.view(height,-1).data, v.data)) - sigma = u.dot(w.view(height, -1).mv(v)) - setattr(self.module, self.name, w / sigma.expand_as(w)) - - def _made_params(self): - try: - u = getattr(self.module, self.name + "_u") - v = getattr(self.module, self.name + "_v") - w = getattr(self.module, self.name + "_bar") - return True - except AttributeError: - return False - - - def _make_params(self): - w = getattr(self.module, self.name) - - height = w.data.shape[0] - width = w.view(height, -1).data.shape[1] - - u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) - v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) - u.data = l2normalize(u.data) - v.data = l2normalize(v.data) - w_bar = Parameter(w.data) - - del self.module._parameters[self.name] - - self.module.register_parameter(self.name + "_u", u) - self.module.register_parameter(self.name + "_v", v) - self.module.register_parameter(self.name + "_bar", w_bar) - - - def forward(self, *args): - self._update_u_v() - return self.module.forward(*args) \ No newline at end of file diff --git a/spaces/aijack/jojo/e4e/datasets/__init__.py b/spaces/aijack/jojo/e4e/datasets/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/akhaliq/CaptchaCracker/README.md b/spaces/akhaliq/CaptchaCracker/README.md deleted file mode 100644 index 4534dde01c80e4c8a2cb66d272a51b81f908ca50..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/CaptchaCracker/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: CaptchaCracker -emoji: 🏢 -colorFrom: pink -colorTo: purple -sdk: gradio -sdk_version: 2.9b24 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/akhaliq/GPEN/face_detect/layers/modules/multibox_loss.py b/spaces/akhaliq/GPEN/face_detect/layers/modules/multibox_loss.py deleted file mode 100644 index 096620480eba59e9d893c1940899f7e3d6736cae..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/GPEN/face_detect/layers/modules/multibox_loss.py +++ /dev/null @@ -1,125 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.autograd import Variable -from utils.box_utils import match, log_sum_exp -from data import cfg_mnet -GPU = cfg_mnet['gpu_train'] - -class MultiBoxLoss(nn.Module): - """SSD Weighted Loss Function - Compute Targets: - 1) Produce Confidence Target Indices by matching ground truth boxes - with (default) 'priorboxes' that have jaccard index > threshold parameter - (default threshold: 0.5). - 2) Produce localization target by 'encoding' variance into offsets of ground - truth boxes and their matched 'priorboxes'. - 3) Hard negative mining to filter the excessive number of negative examples - that comes with using a large number of default bounding boxes. - (default negative:positive ratio 3:1) - Objective Loss: - L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N - Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss - weighted by α which is set to 1 by cross val. - Args: - c: class confidences, - l: predicted boxes, - g: ground truth boxes - N: number of matched default boxes - See: https://arxiv.org/pdf/1512.02325.pdf for more details. - """ - - def __init__(self, num_classes, overlap_thresh, prior_for_matching, bkg_label, neg_mining, neg_pos, neg_overlap, encode_target): - super(MultiBoxLoss, self).__init__() - self.num_classes = num_classes - self.threshold = overlap_thresh - self.background_label = bkg_label - self.encode_target = encode_target - self.use_prior_for_matching = prior_for_matching - self.do_neg_mining = neg_mining - self.negpos_ratio = neg_pos - self.neg_overlap = neg_overlap - self.variance = [0.1, 0.2] - - def forward(self, predictions, priors, targets): - """Multibox Loss - Args: - predictions (tuple): A tuple containing loc preds, conf preds, - and prior boxes from SSD net. - conf shape: torch.size(batch_size,num_priors,num_classes) - loc shape: torch.size(batch_size,num_priors,4) - priors shape: torch.size(num_priors,4) - - ground_truth (tensor): Ground truth boxes and labels for a batch, - shape: [batch_size,num_objs,5] (last idx is the label). - """ - - loc_data, conf_data, landm_data = predictions - priors = priors - num = loc_data.size(0) - num_priors = (priors.size(0)) - - # match priors (default boxes) and ground truth boxes - loc_t = torch.Tensor(num, num_priors, 4) - landm_t = torch.Tensor(num, num_priors, 10) - conf_t = torch.LongTensor(num, num_priors) - for idx in range(num): - truths = targets[idx][:, :4].data - labels = targets[idx][:, -1].data - landms = targets[idx][:, 4:14].data - defaults = priors.data - match(self.threshold, truths, defaults, self.variance, labels, landms, loc_t, conf_t, landm_t, idx) - if GPU: - loc_t = loc_t.cuda() - conf_t = conf_t.cuda() - landm_t = landm_t.cuda() - - zeros = torch.tensor(0).cuda() - # landm Loss (Smooth L1) - # Shape: [batch,num_priors,10] - pos1 = conf_t > zeros - num_pos_landm = pos1.long().sum(1, keepdim=True) - N1 = max(num_pos_landm.data.sum().float(), 1) - pos_idx1 = pos1.unsqueeze(pos1.dim()).expand_as(landm_data) - landm_p = landm_data[pos_idx1].view(-1, 10) - landm_t = landm_t[pos_idx1].view(-1, 10) - loss_landm = F.smooth_l1_loss(landm_p, landm_t, reduction='sum') - - - pos = conf_t != zeros - conf_t[pos] = 1 - - # Localization Loss (Smooth L1) - # Shape: [batch,num_priors,4] - pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data) - loc_p = loc_data[pos_idx].view(-1, 4) - loc_t = loc_t[pos_idx].view(-1, 4) - loss_l = F.smooth_l1_loss(loc_p, loc_t, reduction='sum') - - # Compute max conf across batch for hard negative mining - batch_conf = conf_data.view(-1, self.num_classes) - loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1)) - - # Hard Negative Mining - loss_c[pos.view(-1, 1)] = 0 # filter out pos boxes for now - loss_c = loss_c.view(num, -1) - _, loss_idx = loss_c.sort(1, descending=True) - _, idx_rank = loss_idx.sort(1) - num_pos = pos.long().sum(1, keepdim=True) - num_neg = torch.clamp(self.negpos_ratio*num_pos, max=pos.size(1)-1) - neg = idx_rank < num_neg.expand_as(idx_rank) - - # Confidence Loss Including Positive and Negative Examples - pos_idx = pos.unsqueeze(2).expand_as(conf_data) - neg_idx = neg.unsqueeze(2).expand_as(conf_data) - conf_p = conf_data[(pos_idx+neg_idx).gt(0)].view(-1,self.num_classes) - targets_weighted = conf_t[(pos+neg).gt(0)] - loss_c = F.cross_entropy(conf_p, targets_weighted, reduction='sum') - - # Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N - N = max(num_pos.data.sum().float(), 1) - loss_l /= N - loss_c /= N - loss_landm /= N1 - - return loss_l, loss_c, loss_landm diff --git a/spaces/akhaliq/JoJoGAN/e4e/datasets/inference_dataset.py b/spaces/akhaliq/JoJoGAN/e4e/datasets/inference_dataset.py deleted file mode 100644 index fb577d7b538d634f27013c2784d2ea32143154cb..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/JoJoGAN/e4e/datasets/inference_dataset.py +++ /dev/null @@ -1,25 +0,0 @@ -from torch.utils.data import Dataset -from PIL import Image -from utils import data_utils - - -class InferenceDataset(Dataset): - - def __init__(self, root, opts, transform=None, preprocess=None): - self.paths = sorted(data_utils.make_dataset(root)) - self.transform = transform - self.preprocess = preprocess - self.opts = opts - - def __len__(self): - return len(self.paths) - - def __getitem__(self, index): - from_path = self.paths[index] - if self.preprocess is not None: - from_im = self.preprocess(from_path) - else: - from_im = Image.open(from_path).convert('RGB') - if self.transform: - from_im = self.transform(from_im) - return from_im diff --git a/spaces/akhaliq/JoJoGAN/e4e/scripts/train.py b/spaces/akhaliq/JoJoGAN/e4e/scripts/train.py deleted file mode 100644 index d885cfde49a0b21140e663e475918698d5e51ee3..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/JoJoGAN/e4e/scripts/train.py +++ /dev/null @@ -1,88 +0,0 @@ -""" -This file runs the main training/val loop -""" -import os -import json -import math -import sys -import pprint -import torch -from argparse import Namespace - -sys.path.append(".") -sys.path.append("..") - -from options.train_options import TrainOptions -from training.coach import Coach - - -def main(): - opts = TrainOptions().parse() - previous_train_ckpt = None - if opts.resume_training_from_ckpt: - opts, previous_train_ckpt = load_train_checkpoint(opts) - else: - setup_progressive_steps(opts) - create_initial_experiment_dir(opts) - - coach = Coach(opts, previous_train_ckpt) - coach.train() - - -def load_train_checkpoint(opts): - train_ckpt_path = opts.resume_training_from_ckpt - previous_train_ckpt = torch.load(opts.resume_training_from_ckpt, map_location='cpu') - new_opts_dict = vars(opts) - opts = previous_train_ckpt['opts'] - opts['resume_training_from_ckpt'] = train_ckpt_path - update_new_configs(opts, new_opts_dict) - pprint.pprint(opts) - opts = Namespace(**opts) - if opts.sub_exp_dir is not None: - sub_exp_dir = opts.sub_exp_dir - opts.exp_dir = os.path.join(opts.exp_dir, sub_exp_dir) - create_initial_experiment_dir(opts) - return opts, previous_train_ckpt - - -def setup_progressive_steps(opts): - log_size = int(math.log(opts.stylegan_size, 2)) - num_style_layers = 2*log_size - 2 - num_deltas = num_style_layers - 1 - if opts.progressive_start is not None: # If progressive delta training - opts.progressive_steps = [0] - next_progressive_step = opts.progressive_start - for i in range(num_deltas): - opts.progressive_steps.append(next_progressive_step) - next_progressive_step += opts.progressive_step_every - - assert opts.progressive_steps is None or is_valid_progressive_steps(opts, num_style_layers), \ - "Invalid progressive training input" - - -def is_valid_progressive_steps(opts, num_style_layers): - return len(opts.progressive_steps) == num_style_layers and opts.progressive_steps[0] == 0 - - -def create_initial_experiment_dir(opts): - if os.path.exists(opts.exp_dir): - raise Exception('Oops... {} already exists'.format(opts.exp_dir)) - os.makedirs(opts.exp_dir) - - opts_dict = vars(opts) - pprint.pprint(opts_dict) - with open(os.path.join(opts.exp_dir, 'opt.json'), 'w') as f: - json.dump(opts_dict, f, indent=4, sort_keys=True) - - -def update_new_configs(ckpt_opts, new_opts): - for k, v in new_opts.items(): - if k not in ckpt_opts: - ckpt_opts[k] = v - if new_opts['update_param_list']: - for param in new_opts['update_param_list']: - ckpt_opts[param] = new_opts[param] - - -if __name__ == '__main__': - main() diff --git a/spaces/akhaliq/stylegan3_clip/torch_utils/ops/conv2d_gradfix.py b/spaces/akhaliq/stylegan3_clip/torch_utils/ops/conv2d_gradfix.py deleted file mode 100644 index 8056b5dbac9117d64aadb23bf7a3a36388a6bb99..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/stylegan3_clip/torch_utils/ops/conv2d_gradfix.py +++ /dev/null @@ -1,198 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Custom replacement for `torch.nn.functional.conv2d` that supports -arbitrarily high order gradients with zero performance penalty.""" - -import contextlib -import torch - -# pylint: disable=redefined-builtin -# pylint: disable=arguments-differ -# pylint: disable=protected-access - -#---------------------------------------------------------------------------- - -enabled = False # Enable the custom op by setting this to true. -weight_gradients_disabled = False # Forcefully disable computation of gradients with respect to the weights. - -@contextlib.contextmanager -def no_weight_gradients(disable=True): - global weight_gradients_disabled - old = weight_gradients_disabled - if disable: - weight_gradients_disabled = True - yield - weight_gradients_disabled = old - -#---------------------------------------------------------------------------- - -def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1): - if _should_use_custom_op(input): - return _conv2d_gradfix(transpose=False, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=0, dilation=dilation, groups=groups).apply(input, weight, bias) - return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups) - -def conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1): - if _should_use_custom_op(input): - return _conv2d_gradfix(transpose=True, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation).apply(input, weight, bias) - return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation) - -#---------------------------------------------------------------------------- - -def _should_use_custom_op(input): - assert isinstance(input, torch.Tensor) - if (not enabled) or (not torch.backends.cudnn.enabled): - return False - if input.device.type != 'cuda': - return False - return True - -def _tuple_of_ints(xs, ndim): - xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim - assert len(xs) == ndim - assert all(isinstance(x, int) for x in xs) - return xs - -#---------------------------------------------------------------------------- - -_conv2d_gradfix_cache = dict() -_null_tensor = torch.empty([0]) - -def _conv2d_gradfix(transpose, weight_shape, stride, padding, output_padding, dilation, groups): - # Parse arguments. - ndim = 2 - weight_shape = tuple(weight_shape) - stride = _tuple_of_ints(stride, ndim) - padding = _tuple_of_ints(padding, ndim) - output_padding = _tuple_of_ints(output_padding, ndim) - dilation = _tuple_of_ints(dilation, ndim) - - # Lookup from cache. - key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups) - if key in _conv2d_gradfix_cache: - return _conv2d_gradfix_cache[key] - - # Validate arguments. - assert groups >= 1 - assert len(weight_shape) == ndim + 2 - assert all(stride[i] >= 1 for i in range(ndim)) - assert all(padding[i] >= 0 for i in range(ndim)) - assert all(dilation[i] >= 0 for i in range(ndim)) - if not transpose: - assert all(output_padding[i] == 0 for i in range(ndim)) - else: # transpose - assert all(0 <= output_padding[i] < max(stride[i], dilation[i]) for i in range(ndim)) - - # Helpers. - common_kwargs = dict(stride=stride, padding=padding, dilation=dilation, groups=groups) - def calc_output_padding(input_shape, output_shape): - if transpose: - return [0, 0] - return [ - input_shape[i + 2] - - (output_shape[i + 2] - 1) * stride[i] - - (1 - 2 * padding[i]) - - dilation[i] * (weight_shape[i + 2] - 1) - for i in range(ndim) - ] - - # Forward & backward. - class Conv2d(torch.autograd.Function): - @staticmethod - def forward(ctx, input, weight, bias): - assert weight.shape == weight_shape - ctx.save_for_backward( - input if weight.requires_grad else _null_tensor, - weight if input.requires_grad else _null_tensor, - ) - ctx.input_shape = input.shape - - # Simple 1x1 convolution => cuBLAS (only on Volta, not on Ampere). - if weight_shape[2:] == stride == dilation == (1, 1) and padding == (0, 0) and torch.cuda.get_device_capability(input.device) < (8, 0): - a = weight.reshape(groups, weight_shape[0] // groups, weight_shape[1]) - b = input.reshape(input.shape[0], groups, input.shape[1] // groups, -1) - c = (a.transpose(1, 2) if transpose else a) @ b.permute(1, 2, 0, 3).flatten(2) - c = c.reshape(-1, input.shape[0], *input.shape[2:]).transpose(0, 1) - c = c if bias is None else c + bias.unsqueeze(0).unsqueeze(2).unsqueeze(3) - return c.contiguous(memory_format=(torch.channels_last if input.stride(1) == 1 else torch.contiguous_format)) - - # General case => cuDNN. - if transpose: - return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, output_padding=output_padding, **common_kwargs) - return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, **common_kwargs) - - @staticmethod - def backward(ctx, grad_output): - input, weight = ctx.saved_tensors - input_shape = ctx.input_shape - grad_input = None - grad_weight = None - grad_bias = None - - if ctx.needs_input_grad[0]: - p = calc_output_padding(input_shape=input_shape, output_shape=grad_output.shape) - op = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs) - grad_input = op.apply(grad_output, weight, None) - assert grad_input.shape == input_shape - - if ctx.needs_input_grad[1] and not weight_gradients_disabled: - grad_weight = Conv2dGradWeight.apply(grad_output, input) - assert grad_weight.shape == weight_shape - - if ctx.needs_input_grad[2]: - grad_bias = grad_output.sum([0, 2, 3]) - - return grad_input, grad_weight, grad_bias - - # Gradient with respect to the weights. - class Conv2dGradWeight(torch.autograd.Function): - @staticmethod - def forward(ctx, grad_output, input): - ctx.save_for_backward( - grad_output if input.requires_grad else _null_tensor, - input if grad_output.requires_grad else _null_tensor, - ) - ctx.grad_output_shape = grad_output.shape - ctx.input_shape = input.shape - - # Simple 1x1 convolution => cuBLAS (on both Volta and Ampere). - if weight_shape[2:] == stride == dilation == (1, 1) and padding == (0, 0): - a = grad_output.reshape(grad_output.shape[0], groups, grad_output.shape[1] // groups, -1).permute(1, 2, 0, 3).flatten(2) - b = input.reshape(input.shape[0], groups, input.shape[1] // groups, -1).permute(1, 2, 0, 3).flatten(2) - c = (b @ a.transpose(1, 2) if transpose else a @ b.transpose(1, 2)).reshape(weight_shape) - return c.contiguous(memory_format=(torch.channels_last if input.stride(1) == 1 else torch.contiguous_format)) - - # General case => cuDNN. - name = 'aten::cudnn_convolution_transpose_backward_weight' if transpose else 'aten::cudnn_convolution_backward_weight' - flags = [torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic, torch.backends.cudnn.allow_tf32] - return torch._C._jit_get_operation(name)(weight_shape, grad_output, input, padding, stride, dilation, groups, *flags) - - @staticmethod - def backward(ctx, grad2_grad_weight): - grad_output, input = ctx.saved_tensors - grad_output_shape = ctx.grad_output_shape - input_shape = ctx.input_shape - grad2_grad_output = None - grad2_input = None - - if ctx.needs_input_grad[0]: - grad2_grad_output = Conv2d.apply(input, grad2_grad_weight, None) - assert grad2_grad_output.shape == grad_output_shape - - if ctx.needs_input_grad[1]: - p = calc_output_padding(input_shape=input_shape, output_shape=grad_output_shape) - op = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs) - grad2_input = op.apply(grad_output, grad2_grad_weight, None) - assert grad2_input.shape == input_shape - - return grad2_grad_output, grad2_input - - _conv2d_gradfix_cache[key] = Conv2d - return Conv2d - -#---------------------------------------------------------------------------- diff --git a/spaces/alaa-lab/InstructCV/README.md b/spaces/alaa-lab/InstructCV/README.md deleted file mode 100644 index d42f2021ea1686fe6ec922cc402af56e6296674f..0000000000000000000000000000000000000000 --- a/spaces/alaa-lab/InstructCV/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: InstructCV -emoji: 🌖 -colorFrom: red -colorTo: pink -sdk: gradio -sdk_version: 3.34.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/commands/install.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/commands/install.py deleted file mode 100644 index 34e4c2f84c81731961ca6f9503bf826ed71e4cdc..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/commands/install.py +++ /dev/null @@ -1,771 +0,0 @@ -import errno -import operator -import os -import shutil -import site -from optparse import SUPPRESS_HELP, Values -from typing import Iterable, List, Optional - -from pip._vendor.packaging.utils import canonicalize_name - -from pip._internal.cache import WheelCache -from pip._internal.cli import cmdoptions -from pip._internal.cli.cmdoptions import make_target_python -from pip._internal.cli.req_command import ( - RequirementCommand, - warn_if_run_as_root, - with_cleanup, -) -from pip._internal.cli.status_codes import ERROR, SUCCESS -from pip._internal.exceptions import CommandError, InstallationError -from pip._internal.locations import get_scheme -from pip._internal.metadata import get_environment -from pip._internal.models.format_control import FormatControl -from pip._internal.operations.check import ConflictDetails, check_install_conflicts -from pip._internal.req import install_given_reqs -from pip._internal.req.req_install import InstallRequirement -from pip._internal.req.req_tracker import get_requirement_tracker -from pip._internal.utils.compat import WINDOWS -from pip._internal.utils.distutils_args import parse_distutils_args -from pip._internal.utils.filesystem import test_writable_dir -from pip._internal.utils.logging import getLogger -from pip._internal.utils.misc import ( - ensure_dir, - get_pip_version, - protect_pip_from_modification_on_windows, - write_output, -) -from pip._internal.utils.temp_dir import TempDirectory -from pip._internal.utils.virtualenv import ( - running_under_virtualenv, - virtualenv_no_global, -) -from pip._internal.wheel_builder import ( - BinaryAllowedPredicate, - build, - should_build_for_install_command, -) - -logger = getLogger(__name__) - - -def get_check_binary_allowed(format_control: FormatControl) -> BinaryAllowedPredicate: - def check_binary_allowed(req: InstallRequirement) -> bool: - canonical_name = canonicalize_name(req.name or "") - allowed_formats = format_control.get_allowed_formats(canonical_name) - return "binary" in allowed_formats - - return check_binary_allowed - - -class InstallCommand(RequirementCommand): - """ - Install packages from: - - - PyPI (and other indexes) using requirement specifiers. - - VCS project urls. - - Local project directories. - - Local or remote source archives. - - pip also supports installing from "requirements files", which provide - an easy way to specify a whole environment to be installed. - """ - - usage = """ - %prog [options] [package-index-options] ... - %prog [options] -r [package-index-options] ... - %prog [options] [-e] ... - %prog [options] [-e] ... - %prog [options] ...""" - - def add_options(self) -> None: - self.cmd_opts.add_option(cmdoptions.requirements()) - self.cmd_opts.add_option(cmdoptions.constraints()) - self.cmd_opts.add_option(cmdoptions.no_deps()) - self.cmd_opts.add_option(cmdoptions.pre()) - - self.cmd_opts.add_option(cmdoptions.editable()) - self.cmd_opts.add_option( - "-t", - "--target", - dest="target_dir", - metavar="dir", - default=None, - help=( - "Install packages into . " - "By default this will not replace existing files/folders in " - ". Use --upgrade to replace existing packages in " - "with new versions." - ), - ) - cmdoptions.add_target_python_options(self.cmd_opts) - - self.cmd_opts.add_option( - "--user", - dest="use_user_site", - action="store_true", - help=( - "Install to the Python user install directory for your " - "platform. Typically ~/.local/, or %APPDATA%\\Python on " - "Windows. (See the Python documentation for site.USER_BASE " - "for full details.)" - ), - ) - self.cmd_opts.add_option( - "--no-user", - dest="use_user_site", - action="store_false", - help=SUPPRESS_HELP, - ) - self.cmd_opts.add_option( - "--root", - dest="root_path", - metavar="dir", - default=None, - help="Install everything relative to this alternate root directory.", - ) - self.cmd_opts.add_option( - "--prefix", - dest="prefix_path", - metavar="dir", - default=None, - help=( - "Installation prefix where lib, bin and other top-level " - "folders are placed" - ), - ) - - self.cmd_opts.add_option(cmdoptions.src()) - - self.cmd_opts.add_option( - "-U", - "--upgrade", - dest="upgrade", - action="store_true", - help=( - "Upgrade all specified packages to the newest available " - "version. The handling of dependencies depends on the " - "upgrade-strategy used." - ), - ) - - self.cmd_opts.add_option( - "--upgrade-strategy", - dest="upgrade_strategy", - default="only-if-needed", - choices=["only-if-needed", "eager"], - help=( - "Determines how dependency upgrading should be handled " - "[default: %default]. " - '"eager" - dependencies are upgraded regardless of ' - "whether the currently installed version satisfies the " - "requirements of the upgraded package(s). " - '"only-if-needed" - are upgraded only when they do not ' - "satisfy the requirements of the upgraded package(s)." - ), - ) - - self.cmd_opts.add_option( - "--force-reinstall", - dest="force_reinstall", - action="store_true", - help="Reinstall all packages even if they are already up-to-date.", - ) - - self.cmd_opts.add_option( - "-I", - "--ignore-installed", - dest="ignore_installed", - action="store_true", - help=( - "Ignore the installed packages, overwriting them. " - "This can break your system if the existing package " - "is of a different version or was installed " - "with a different package manager!" - ), - ) - - self.cmd_opts.add_option(cmdoptions.ignore_requires_python()) - self.cmd_opts.add_option(cmdoptions.no_build_isolation()) - self.cmd_opts.add_option(cmdoptions.use_pep517()) - self.cmd_opts.add_option(cmdoptions.no_use_pep517()) - - self.cmd_opts.add_option(cmdoptions.install_options()) - self.cmd_opts.add_option(cmdoptions.global_options()) - - self.cmd_opts.add_option( - "--compile", - action="store_true", - dest="compile", - default=True, - help="Compile Python source files to bytecode", - ) - - self.cmd_opts.add_option( - "--no-compile", - action="store_false", - dest="compile", - help="Do not compile Python source files to bytecode", - ) - - self.cmd_opts.add_option( - "--no-warn-script-location", - action="store_false", - dest="warn_script_location", - default=True, - help="Do not warn when installing scripts outside PATH", - ) - self.cmd_opts.add_option( - "--no-warn-conflicts", - action="store_false", - dest="warn_about_conflicts", - default=True, - help="Do not warn about broken dependencies", - ) - - self.cmd_opts.add_option(cmdoptions.no_binary()) - self.cmd_opts.add_option(cmdoptions.only_binary()) - self.cmd_opts.add_option(cmdoptions.prefer_binary()) - self.cmd_opts.add_option(cmdoptions.require_hashes()) - self.cmd_opts.add_option(cmdoptions.progress_bar()) - - index_opts = cmdoptions.make_option_group( - cmdoptions.index_group, - self.parser, - ) - - self.parser.insert_option_group(0, index_opts) - self.parser.insert_option_group(0, self.cmd_opts) - - @with_cleanup - def run(self, options: Values, args: List[str]) -> int: - if options.use_user_site and options.target_dir is not None: - raise CommandError("Can not combine '--user' and '--target'") - - cmdoptions.check_install_build_global(options) - upgrade_strategy = "to-satisfy-only" - if options.upgrade: - upgrade_strategy = options.upgrade_strategy - - cmdoptions.check_dist_restriction(options, check_target=True) - - install_options = options.install_options or [] - - logger.verbose("Using %s", get_pip_version()) - options.use_user_site = decide_user_install( - options.use_user_site, - prefix_path=options.prefix_path, - target_dir=options.target_dir, - root_path=options.root_path, - isolated_mode=options.isolated_mode, - ) - - target_temp_dir: Optional[TempDirectory] = None - target_temp_dir_path: Optional[str] = None - if options.target_dir: - options.ignore_installed = True - options.target_dir = os.path.abspath(options.target_dir) - if ( - # fmt: off - os.path.exists(options.target_dir) and - not os.path.isdir(options.target_dir) - # fmt: on - ): - raise CommandError( - "Target path exists but is not a directory, will not continue." - ) - - # Create a target directory for using with the target option - target_temp_dir = TempDirectory(kind="target") - target_temp_dir_path = target_temp_dir.path - self.enter_context(target_temp_dir) - - global_options = options.global_options or [] - - session = self.get_default_session(options) - - target_python = make_target_python(options) - finder = self._build_package_finder( - options=options, - session=session, - target_python=target_python, - ignore_requires_python=options.ignore_requires_python, - ) - wheel_cache = WheelCache(options.cache_dir, options.format_control) - - req_tracker = self.enter_context(get_requirement_tracker()) - - directory = TempDirectory( - delete=not options.no_clean, - kind="install", - globally_managed=True, - ) - - try: - reqs = self.get_requirements(args, options, finder, session) - - # Only when installing is it permitted to use PEP 660. - # In other circumstances (pip wheel, pip download) we generate - # regular (i.e. non editable) metadata and wheels. - for req in reqs: - req.permit_editable_wheels = True - - reject_location_related_install_options(reqs, options.install_options) - - preparer = self.make_requirement_preparer( - temp_build_dir=directory, - options=options, - req_tracker=req_tracker, - session=session, - finder=finder, - use_user_site=options.use_user_site, - verbosity=self.verbosity, - ) - resolver = self.make_resolver( - preparer=preparer, - finder=finder, - options=options, - wheel_cache=wheel_cache, - use_user_site=options.use_user_site, - ignore_installed=options.ignore_installed, - ignore_requires_python=options.ignore_requires_python, - force_reinstall=options.force_reinstall, - upgrade_strategy=upgrade_strategy, - use_pep517=options.use_pep517, - ) - - self.trace_basic_info(finder) - - requirement_set = resolver.resolve( - reqs, check_supported_wheels=not options.target_dir - ) - - try: - pip_req = requirement_set.get_requirement("pip") - except KeyError: - modifying_pip = False - else: - # If we're not replacing an already installed pip, - # we're not modifying it. - modifying_pip = pip_req.satisfied_by is None - protect_pip_from_modification_on_windows(modifying_pip=modifying_pip) - - check_binary_allowed = get_check_binary_allowed(finder.format_control) - - reqs_to_build = [ - r - for r in requirement_set.requirements.values() - if should_build_for_install_command(r, check_binary_allowed) - ] - - _, build_failures = build( - reqs_to_build, - wheel_cache=wheel_cache, - verify=True, - build_options=[], - global_options=[], - ) - - # If we're using PEP 517, we cannot do a legacy setup.py install - # so we fail here. - pep517_build_failure_names: List[str] = [ - r.name for r in build_failures if r.use_pep517 # type: ignore - ] - if pep517_build_failure_names: - raise InstallationError( - "Could not build wheels for {}, which is required to " - "install pyproject.toml-based projects".format( - ", ".join(pep517_build_failure_names) - ) - ) - - # For now, we just warn about failures building legacy - # requirements, as we'll fall through to a setup.py install for - # those. - for r in build_failures: - if not r.use_pep517: - r.legacy_install_reason = 8368 - - to_install = resolver.get_installation_order(requirement_set) - - # Check for conflicts in the package set we're installing. - conflicts: Optional[ConflictDetails] = None - should_warn_about_conflicts = ( - not options.ignore_dependencies and options.warn_about_conflicts - ) - if should_warn_about_conflicts: - conflicts = self._determine_conflicts(to_install) - - # Don't warn about script install locations if - # --target or --prefix has been specified - warn_script_location = options.warn_script_location - if options.target_dir or options.prefix_path: - warn_script_location = False - - installed = install_given_reqs( - to_install, - install_options, - global_options, - root=options.root_path, - home=target_temp_dir_path, - prefix=options.prefix_path, - warn_script_location=warn_script_location, - use_user_site=options.use_user_site, - pycompile=options.compile, - ) - - lib_locations = get_lib_location_guesses( - user=options.use_user_site, - home=target_temp_dir_path, - root=options.root_path, - prefix=options.prefix_path, - isolated=options.isolated_mode, - ) - env = get_environment(lib_locations) - - installed.sort(key=operator.attrgetter("name")) - items = [] - for result in installed: - item = result.name - try: - installed_dist = env.get_distribution(item) - if installed_dist is not None: - item = f"{item}-{installed_dist.version}" - except Exception: - pass - items.append(item) - - if conflicts is not None: - self._warn_about_conflicts( - conflicts, - resolver_variant=self.determine_resolver_variant(options), - ) - - installed_desc = " ".join(items) - if installed_desc: - write_output( - "Successfully installed %s", - installed_desc, - ) - except OSError as error: - show_traceback = self.verbosity >= 1 - - message = create_os_error_message( - error, - show_traceback, - options.use_user_site, - ) - logger.error(message, exc_info=show_traceback) # noqa - - return ERROR - - if options.target_dir: - assert target_temp_dir - self._handle_target_dir( - options.target_dir, target_temp_dir, options.upgrade - ) - - warn_if_run_as_root() - return SUCCESS - - def _handle_target_dir( - self, target_dir: str, target_temp_dir: TempDirectory, upgrade: bool - ) -> None: - ensure_dir(target_dir) - - # Checking both purelib and platlib directories for installed - # packages to be moved to target directory - lib_dir_list = [] - - # Checking both purelib and platlib directories for installed - # packages to be moved to target directory - scheme = get_scheme("", home=target_temp_dir.path) - purelib_dir = scheme.purelib - platlib_dir = scheme.platlib - data_dir = scheme.data - - if os.path.exists(purelib_dir): - lib_dir_list.append(purelib_dir) - if os.path.exists(platlib_dir) and platlib_dir != purelib_dir: - lib_dir_list.append(platlib_dir) - if os.path.exists(data_dir): - lib_dir_list.append(data_dir) - - for lib_dir in lib_dir_list: - for item in os.listdir(lib_dir): - if lib_dir == data_dir: - ddir = os.path.join(data_dir, item) - if any(s.startswith(ddir) for s in lib_dir_list[:-1]): - continue - target_item_dir = os.path.join(target_dir, item) - if os.path.exists(target_item_dir): - if not upgrade: - logger.warning( - "Target directory %s already exists. Specify " - "--upgrade to force replacement.", - target_item_dir, - ) - continue - if os.path.islink(target_item_dir): - logger.warning( - "Target directory %s already exists and is " - "a link. pip will not automatically replace " - "links, please remove if replacement is " - "desired.", - target_item_dir, - ) - continue - if os.path.isdir(target_item_dir): - shutil.rmtree(target_item_dir) - else: - os.remove(target_item_dir) - - shutil.move(os.path.join(lib_dir, item), target_item_dir) - - def _determine_conflicts( - self, to_install: List[InstallRequirement] - ) -> Optional[ConflictDetails]: - try: - return check_install_conflicts(to_install) - except Exception: - logger.exception( - "Error while checking for conflicts. Please file an issue on " - "pip's issue tracker: https://github.com/pypa/pip/issues/new" - ) - return None - - def _warn_about_conflicts( - self, conflict_details: ConflictDetails, resolver_variant: str - ) -> None: - package_set, (missing, conflicting) = conflict_details - if not missing and not conflicting: - return - - parts: List[str] = [] - if resolver_variant == "legacy": - parts.append( - "pip's legacy dependency resolver does not consider dependency " - "conflicts when selecting packages. This behaviour is the " - "source of the following dependency conflicts." - ) - else: - assert resolver_variant == "2020-resolver" - parts.append( - "pip's dependency resolver does not currently take into account " - "all the packages that are installed. This behaviour is the " - "source of the following dependency conflicts." - ) - - # NOTE: There is some duplication here, with commands/check.py - for project_name in missing: - version = package_set[project_name][0] - for dependency in missing[project_name]: - message = ( - "{name} {version} requires {requirement}, " - "which is not installed." - ).format( - name=project_name, - version=version, - requirement=dependency[1], - ) - parts.append(message) - - for project_name in conflicting: - version = package_set[project_name][0] - for dep_name, dep_version, req in conflicting[project_name]: - message = ( - "{name} {version} requires {requirement}, but {you} have " - "{dep_name} {dep_version} which is incompatible." - ).format( - name=project_name, - version=version, - requirement=req, - dep_name=dep_name, - dep_version=dep_version, - you=("you" if resolver_variant == "2020-resolver" else "you'll"), - ) - parts.append(message) - - logger.critical("\n".join(parts)) - - -def get_lib_location_guesses( - user: bool = False, - home: Optional[str] = None, - root: Optional[str] = None, - isolated: bool = False, - prefix: Optional[str] = None, -) -> List[str]: - scheme = get_scheme( - "", - user=user, - home=home, - root=root, - isolated=isolated, - prefix=prefix, - ) - return [scheme.purelib, scheme.platlib] - - -def site_packages_writable(root: Optional[str], isolated: bool) -> bool: - return all( - test_writable_dir(d) - for d in set(get_lib_location_guesses(root=root, isolated=isolated)) - ) - - -def decide_user_install( - use_user_site: Optional[bool], - prefix_path: Optional[str] = None, - target_dir: Optional[str] = None, - root_path: Optional[str] = None, - isolated_mode: bool = False, -) -> bool: - """Determine whether to do a user install based on the input options. - - If use_user_site is False, no additional checks are done. - If use_user_site is True, it is checked for compatibility with other - options. - If use_user_site is None, the default behaviour depends on the environment, - which is provided by the other arguments. - """ - # In some cases (config from tox), use_user_site can be set to an integer - # rather than a bool, which 'use_user_site is False' wouldn't catch. - if (use_user_site is not None) and (not use_user_site): - logger.debug("Non-user install by explicit request") - return False - - if use_user_site: - if prefix_path: - raise CommandError( - "Can not combine '--user' and '--prefix' as they imply " - "different installation locations" - ) - if virtualenv_no_global(): - raise InstallationError( - "Can not perform a '--user' install. User site-packages " - "are not visible in this virtualenv." - ) - logger.debug("User install by explicit request") - return True - - # If we are here, user installs have not been explicitly requested/avoided - assert use_user_site is None - - # user install incompatible with --prefix/--target - if prefix_path or target_dir: - logger.debug("Non-user install due to --prefix or --target option") - return False - - # If user installs are not enabled, choose a non-user install - if not site.ENABLE_USER_SITE: - logger.debug("Non-user install because user site-packages disabled") - return False - - # If we have permission for a non-user install, do that, - # otherwise do a user install. - if site_packages_writable(root=root_path, isolated=isolated_mode): - logger.debug("Non-user install because site-packages writeable") - return False - - logger.info( - "Defaulting to user installation because normal site-packages " - "is not writeable" - ) - return True - - -def reject_location_related_install_options( - requirements: List[InstallRequirement], options: Optional[List[str]] -) -> None: - """If any location-changing --install-option arguments were passed for - requirements or on the command-line, then show a deprecation warning. - """ - - def format_options(option_names: Iterable[str]) -> List[str]: - return ["--{}".format(name.replace("_", "-")) for name in option_names] - - offenders = [] - - for requirement in requirements: - install_options = requirement.install_options - location_options = parse_distutils_args(install_options) - if location_options: - offenders.append( - "{!r} from {}".format( - format_options(location_options.keys()), requirement - ) - ) - - if options: - location_options = parse_distutils_args(options) - if location_options: - offenders.append( - "{!r} from command line".format(format_options(location_options.keys())) - ) - - if not offenders: - return - - raise CommandError( - "Location-changing options found in --install-option: {}." - " This is unsupported, use pip-level options like --user," - " --prefix, --root, and --target instead.".format("; ".join(offenders)) - ) - - -def create_os_error_message( - error: OSError, show_traceback: bool, using_user_site: bool -) -> str: - """Format an error message for an OSError - - It may occur anytime during the execution of the install command. - """ - parts = [] - - # Mention the error if we are not going to show a traceback - parts.append("Could not install packages due to an OSError") - if not show_traceback: - parts.append(": ") - parts.append(str(error)) - else: - parts.append(".") - - # Spilt the error indication from a helper message (if any) - parts[-1] += "\n" - - # Suggest useful actions to the user: - # (1) using user site-packages or (2) verifying the permissions - if error.errno == errno.EACCES: - user_option_part = "Consider using the `--user` option" - permissions_part = "Check the permissions" - - if not running_under_virtualenv() and not using_user_site: - parts.extend( - [ - user_option_part, - " or ", - permissions_part.lower(), - ] - ) - else: - parts.append(permissions_part) - parts.append(".\n") - - # Suggest the user to enable Long Paths if path length is - # more than 260 - if ( - WINDOWS - and error.errno == errno.ENOENT - and error.filename - and len(error.filename) > 260 - ): - parts.append( - "HINT: This error might have occurred since " - "this system does not have Windows Long Path " - "support enabled. You can find information on " - "how to enable this at " - "https://pip.pypa.io/warnings/enable-long-paths\n" - ) - - return "".join(parts).strip() + "\n" diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/certifi/core.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/certifi/core.py deleted file mode 100644 index f8d4313d34e531076e65fdb96ff4c7bb9c47de02..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/certifi/core.py +++ /dev/null @@ -1,76 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -certifi.py -~~~~~~~~~~ - -This module returns the installation location of cacert.pem or its contents. -""" -import os - - -class _PipPatchedCertificate(Exception): - pass - - -DEBIAN_CA_CERTS_PATH = '/etc/ssl/certs/ca-certificates.crt' - -try: - # Return a certificate file on disk for a standalone pip zipapp running in - # an isolated build environment to use. Passing --cert to the standalone - # pip does not work since requests calls where() unconditionally on import. - _PIP_STANDALONE_CERT = os.environ.get("_PIP_STANDALONE_CERT") - if _PIP_STANDALONE_CERT: - def where(): - return _PIP_STANDALONE_CERT - raise _PipPatchedCertificate() - - from importlib.resources import path as get_path, read_text - - _CACERT_CTX = None - _CACERT_PATH = None - - def where(): - # This is slightly terrible, but we want to delay extracting the file - # in cases where we're inside of a zipimport situation until someone - # actually calls where(), but we don't want to re-extract the file - # on every call of where(), so we'll do it once then store it in a - # global variable. - global _CACERT_CTX - global _CACERT_PATH - if _CACERT_PATH is None: - # This is slightly janky, the importlib.resources API wants you to - # manage the cleanup of this file, so it doesn't actually return a - # path, it returns a context manager that will give you the path - # when you enter it and will do any cleanup when you leave it. In - # the common case of not needing a temporary file, it will just - # return the file system location and the __exit__() is a no-op. - # - # We also have to hold onto the actual context manager, because - # it will do the cleanup whenever it gets garbage collected, so - # we will also store that at the global level as well. - _CACERT_PATH = DEBIAN_CA_CERTS_PATH - - return _CACERT_PATH - -except _PipPatchedCertificate: - pass - -except ImportError: - # This fallback will work for Python versions prior to 3.7 that lack the - # importlib.resources module but relies on the existing `where` function - # so won't address issues with environments like PyOxidizer that don't set - # __file__ on modules. - def read_text(_module, _path, encoding="ascii"): - with open(where(), "r", encoding=encoding) as data: - return data.read() - - # If we don't have importlib.resources, then we will just do the old logic - # of assuming we're on the filesystem and munge the path directly. - def where(): - return DEBIAN_CA_CERTS_PATH - - -def contents(): - with open(where(), "r", encoding="ascii") as data: - return data.read() diff --git a/spaces/allknowingroger/Image-Models-Test168/app.py b/spaces/allknowingroger/Image-Models-Test168/app.py deleted file mode 100644 index aae239dd385c1e20a88a07f375ae1d2b449a7f7a..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test168/app.py +++ /dev/null @@ -1,144 +0,0 @@ -import gradio as gr -# import os -# import sys -# from pathlib import Path -import time - -models =[ - "Priyanka4/pink-sunglasses-cak", - "Suchithra04/my-pet-dog-asd", - "saibharath/my-roses", - "Deepika912/mybag", - "albertengineer/lora-trained-xl-colab-dongho_1", - "dhyadav/test_db", - "Yntec/EstheticRetroAnime", - "akmalinn/surabaya_monument", - "LATHA345/by-sunrise", -] - - -model_functions = {} -model_idx = 1 -for model_path in models: - try: - model_functions[model_idx] = gr.Interface.load(f"models/{model_path}", live=False, preprocess=True, postprocess=False) - except Exception as error: - def the_fn(txt): - return None - model_functions[model_idx] = gr.Interface(fn=the_fn, inputs=["text"], outputs=["image"]) - model_idx+=1 - - -def send_it_idx(idx): - def send_it_fn(prompt): - output = (model_functions.get(str(idx)) or model_functions.get(str(1)))(prompt) - return output - return send_it_fn - -def get_prompts(prompt_text): - return prompt_text - -def clear_it(val): - if int(val) != 0: - val = 0 - else: - val = 0 - pass - return val - -def all_task_end(cnt,t_stamp): - to = t_stamp + 60 - et = time.time() - if et > to and t_stamp != 0: - d = gr.update(value=0) - tog = gr.update(value=1) - #print(f'to: {to} et: {et}') - else: - if cnt != 0: - d = gr.update(value=et) - else: - d = gr.update(value=0) - tog = gr.update(value=0) - #print (f'passing: to: {to} et: {et}') - pass - return d, tog - -def all_task_start(): - print("\n\n\n\n\n\n\n") - t = time.gmtime() - t_stamp = time.time() - current_time = time.strftime("%H:%M:%S", t) - return gr.update(value=t_stamp), gr.update(value=t_stamp), gr.update(value=0) - -def clear_fn(): - nn = len(models) - return tuple([None, *[None for _ in range(nn)]]) - - - -with gr.Blocks(title="SD Models") as my_interface: - with gr.Column(scale=12): - # with gr.Row(): - # gr.Markdown("""- Primary prompt: 你想画的内容(英文单词,如 a cat, 加英文逗号效果更好;点 Improve 按钮进行完善)\n- Real prompt: 完善后的提示词,出现后再点右边的 Run 按钮开始运行""") - with gr.Row(): - with gr.Row(scale=6): - primary_prompt=gr.Textbox(label="Prompt", value="") - # real_prompt=gr.Textbox(label="Real prompt") - with gr.Row(scale=6): - # improve_prompts_btn=gr.Button("Improve") - with gr.Row(): - run=gr.Button("Run",variant="primary") - clear_btn=gr.Button("Clear") - with gr.Row(): - sd_outputs = {} - model_idx = 1 - for model_path in models: - with gr.Column(scale=3, min_width=320): - with gr.Box(): - sd_outputs[model_idx] = gr.Image(label=model_path) - pass - model_idx += 1 - pass - pass - - with gr.Row(visible=False): - start_box=gr.Number(interactive=False) - end_box=gr.Number(interactive=False) - tog_box=gr.Textbox(value=0,interactive=False) - - start_box.change( - all_task_end, - [start_box, end_box], - [start_box, tog_box], - every=1, - show_progress=False) - - primary_prompt.submit(all_task_start, None, [start_box, end_box, tog_box]) - run.click(all_task_start, None, [start_box, end_box, tog_box]) - runs_dict = {} - model_idx = 1 - for model_path in models: - runs_dict[model_idx] = run.click(model_functions[model_idx], inputs=[primary_prompt], outputs=[sd_outputs[model_idx]]) - model_idx += 1 - pass - pass - - # improve_prompts_btn_clicked=improve_prompts_btn.click( - # get_prompts, - # inputs=[primary_prompt], - # outputs=[primary_prompt], - # cancels=list(runs_dict.values())) - clear_btn.click( - clear_fn, - None, - [primary_prompt, *list(sd_outputs.values())], - cancels=[*list(runs_dict.values())]) - tog_box.change( - clear_it, - tog_box, - tog_box, - cancels=[*list(runs_dict.values())]) - -my_interface.queue(concurrency_count=600, status_update_rate=1) -my_interface.launch(inline=True, show_api=False) - \ No newline at end of file diff --git a/spaces/allknowingroger/Image-Models-Test40/README.md b/spaces/allknowingroger/Image-Models-Test40/README.md deleted file mode 100644 index ec00990abc2e873823799682aec8bf35c2af965e..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test40/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Image Models -emoji: 👀 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: true -duplicated_from: allknowingroger/Image-Models-Test39 ---- - - \ No newline at end of file diff --git a/spaces/alvanlii/FROMAGe/fromage/evaluate.py b/spaces/alvanlii/FROMAGe/fromage/evaluate.py deleted file mode 100644 index 9c78b5b664668799493c5f6348d1f4b9e0534914..0000000000000000000000000000000000000000 --- a/spaces/alvanlii/FROMAGe/fromage/evaluate.py +++ /dev/null @@ -1,307 +0,0 @@ -import collections -import json -import os -from PIL import Image -import numpy as np -import time -import tqdm -import torch -import torch.distributed as dist -from torch.utils.tensorboard import SummaryWriter -from torchmetrics import BLEUScore -import torchvision - -from fromage import losses as losses_utils -from fromage import utils - - -def validate(val_loader, model, tokenizer, criterion, epoch, args): - ngpus_per_node = torch.cuda.device_count() - writer = SummaryWriter(args.log_dir) - bleu_scorers = [BLEUScore(n_gram=i) for i in [1, 2, 3, 4]] - actual_step = (epoch + 1) * args.steps_per_epoch - model_modes = ['captioning', 'retrieval'] - num_words = 32 # Number of tokens to generate. - - feature_extractor = utils.get_feature_extractor_for_model(args.visual_model, image_size=args.image_size, train=False) - - def get_pixel_values_from_path(path: str): - img = Image.open(path) - img = img.resize((args.image_size, args.image_size)) - pixel_values = utils.get_pixel_values_for_model(feature_extractor, img)[None, ...] - - if args.precision == 'fp16': - pixel_values = pixel_values.half() - elif args.precision == 'bf16': - pixel_values = pixel_values.bfloat16() - if torch.cuda.is_available(): - pixel_values = pixel_values.cuda() - return pixel_values - - def run_validate(loader, base_progress=0): - with torch.no_grad(): - end = time.time() - all_generated_captions = [] - all_gt_captions = [] - all_generated_image_paths = [] - all_image_features = [] - all_text_features = [] - - for i, (image_paths, images, caption_images, tgt_tokens, token_len) in tqdm.tqdm(enumerate(loader), position=0, total=len(loader)): - i = base_progress + i - - if torch.cuda.is_available(): - tgt_tokens = tgt_tokens.cuda(args.gpu, non_blocking=True) - token_len = token_len.cuda(args.gpu, non_blocking=True) - images = images.cuda() - - if args.precision == 'fp16': - images = images.half() - elif args.precision == 'bf16': - images = images.bfloat16() - - for model_mode in model_modes: - (model_output, full_labels, last_embedding, _, visual_embs) = model( - images, tgt_tokens, token_len, mode=model_mode, input_prefix=args.input_prompt, inference=True) # (N, T, C) - - if model_mode == 'captioning': - loss = args.cap_loss_scale * model_output.loss - elif model_mode == 'retrieval': - loss = args.ret_loss_scale * model_output.loss - else: - raise NotImplementedError - - output = model_output.logits - if model_mode == 'captioning': - acc1, acc5 = utils.accuracy(output[:, :-1, :], full_labels[:, 1:], -100, topk=(1, 5)) - top1.update(acc1[0], images.size(0)) - top5.update(acc5[0], images.size(0)) - ce_losses.update(loss.item(), images.size(0)) - - if model_mode == 'captioning': - losses.update(loss.item(), images.size(0)) - elif model_mode == 'retrieval': - if args.distributed: - original_last_embedding = torch.clone(last_embedding) - all_visual_embs = [torch.zeros_like(visual_embs) for _ in range(dist.get_world_size())] - all_last_embedding = [torch.zeros_like(last_embedding) for _ in range(dist.get_world_size())] - dist.all_gather(all_visual_embs, visual_embs) - dist.all_gather(all_last_embedding, last_embedding) - - # Overwrite with embeddings produced on this replica, which track the gradients. - all_visual_embs[dist.get_rank()] = visual_embs - all_last_embedding[dist.get_rank()] = last_embedding - visual_embs = torch.cat(all_visual_embs) - last_embedding = torch.cat(all_last_embedding) - start_idx = args.rank * images.shape[0] - end_idx = start_idx + images.shape[0] - assert torch.all(last_embedding[start_idx:end_idx] == original_last_embedding), args.rank - - all_text_features.append(last_embedding.cpu()) - all_image_features.append(visual_embs.cpu()) - - # Run auto-regressive generation sample - if model_mode == 'captioning': - input_embs = model.module.model.get_visual_embs(images, mode='captioning') # (2, n_visual_tokens, D) - if args.input_prompt is not None: - print(f'Adding prefix "{args.input_prompt}" to captioning generate=True.') - prompt_ids = tokenizer(args.input_prompt, add_special_tokens=False, return_tensors="pt").input_ids - prompt_ids = prompt_ids.to(visual_embs.device) - prompt_embs = model.module.model.input_embeddings(prompt_ids) - prompt_embs = prompt_embs.repeat(input_embs.shape[0], 1, 1) - input_embs = torch.cat([input_embs, prompt_embs], dim=1) - - generated_ids, _, _ = model(input_embs, tgt_tokens, token_len, - generate=True, num_words=num_words, temperature=0.0, top_p=1.0, - min_word_tokens=num_words) - - if args.distributed and ngpus_per_node > 1: - all_generated_ids = [torch.zeros_like(generated_ids) for _ in range(dist.get_world_size())] - dist.all_gather(all_generated_ids, generated_ids) - all_generated_ids[dist.get_rank()] = generated_ids - generated_ids = torch.cat(all_generated_ids) - - all_tgt_tokens = [torch.zeros_like(tgt_tokens) for _ in range(dist.get_world_size())] - dist.all_gather(all_tgt_tokens, tgt_tokens) - all_tgt_tokens[dist.get_rank()] = tgt_tokens - all_tgt_tokens = torch.cat(all_tgt_tokens) - - all_image_paths = [[None for _ in image_paths] for _ in range(dist.get_world_size())] - dist.all_gather_object(all_image_paths, image_paths) - all_image_paths[dist.get_rank()] = image_paths - image_paths = [] - for p in all_image_paths: - image_paths.extend(p) - else: - all_tgt_tokens = tgt_tokens - - all_tgt_tokens[all_tgt_tokens == -100] = tokenizer.pad_token_id - generated_captions = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) - gt_captions = tokenizer.batch_decode(all_tgt_tokens, skip_special_tokens=True) - - for cap_i in range(len(generated_captions)): - image_path = image_paths[cap_i] - all_generated_image_paths.append(image_path) - stop_idx = generated_captions[cap_i].find('.') - if stop_idx > 5: - all_generated_captions.append(generated_captions[cap_i][:stop_idx]) - else: - all_generated_captions.append(generated_captions[cap_i]) - all_gt_captions.append([gt_captions[cap_i]]) - elif model_mode == 'retrieval': - if i == 0: - # Generate without image input to visualize text-generation ability. - input_ids = tgt_tokens[:, :3] # Use first 3 tokens as initial prompt for generation. - input_embs = model.module.model.input_embeddings(input_ids) # (N, T, D) - generated_ids, _, _ = model(input_embs, tgt_tokens, token_len, generate=True, num_words=num_words, temperature=0.0, top_p=1.0) - generated_ids = torch.cat([input_ids, generated_ids], dim=1) - generated_captions = tokenizer.batch_decode(generated_ids, skip_special_tokens=False) - gt_captions = tokenizer.batch_decode(tgt_tokens, skip_special_tokens=False) - else: - raise NotImplementedError - - if i == 0: - max_to_display = 5 - print('=' * 30) - print('Generated samples:') - for cap_i, cap in enumerate(generated_captions[:max_to_display]): - print(f'{cap_i}) {cap}') - print('=' * 30) - print('Real samples:') - for cap_i, cap in enumerate(gt_captions[:max_to_display]): - print(f'{cap_i}) {cap}') - print('=' * 30) - - # Write images and captions to Tensorboard. - if not args.distributed or (args.rank % ngpus_per_node == 0): - max_images_to_show = 16 - normalized_images = images - images.min() - normalized_images /= normalized_images.max() # (N, 3, H, W) - # Create generated caption text. - generated_cap_images = torch.stack([ - utils.create_image_of_text( - generated_captions[j].encode('ascii', 'ignore'), - width=normalized_images.shape[3], - color=(255, 255, 0)) - for j in range(normalized_images.shape[0])], axis=0) - # Append gt/generated caption images. - display_images = torch.cat([normalized_images.float().cpu(), caption_images, generated_cap_images], axis=2)[:max_images_to_show] - grid = torchvision.utils.make_grid(display_images, nrow=int(max_images_to_show ** 0.5), padding=4) - writer.add_image(f'val/images_{model_mode}', grid, actual_step) - - # measure elapsed time - batch_time.update(time.time() - end) - end = time.time() - - if i % args.print_freq == 0: - progress.display(i + 1) - - if i == args.val_steps_per_epoch - 1: - break - - # Measure captioning metrics. - path2captions = collections.defaultdict(list) - for image_path, caption in zip(all_generated_image_paths, all_gt_captions): - assert len(caption) == 1, caption - path2captions[image_path].append(caption[0].replace('[RET]', '')) - full_gt_captions = [path2captions[path] for path in all_generated_image_paths] - - print(f'Computing BLEU with {len(all_generated_captions)} generated captions:' - f'{all_generated_captions[:5]} and {len(full_gt_captions)} groundtruth captions:', - f'{full_gt_captions[:5]}.') - bleu1_score = bleu_scorers[0](all_generated_captions, full_gt_captions) - bleu1.update(bleu1_score, 1) - bleu2_score = bleu_scorers[1](all_generated_captions, full_gt_captions) - bleu2.update(bleu2_score, 1) - bleu3_score = bleu_scorers[2](all_generated_captions, full_gt_captions) - bleu3.update(bleu3_score, 2) - bleu4_score = bleu_scorers[3](all_generated_captions, full_gt_captions) - bleu4.update(bleu4_score, 3) - - # Measure retrieval metrics over the entire validation set. - all_image_features = torch.cat(all_image_features, axis=0) # (coco_val_len, 2048) - all_text_features = torch.cat(all_text_features, axis=0) # (coco_val_len, 2048) - - print(f"Computing similarity between {all_image_features.shape} and {all_text_features.shape}.") - logits_per_image = all_image_features @ all_text_features.t() - logits_per_text = logits_per_image.t() - all_image_acc1, all_image_acc5 = losses_utils.contrastive_acc(logits_per_image, topk=(1, 5)) - all_caption_acc1, all_caption_acc5 = losses_utils.contrastive_acc(logits_per_text, topk=(1, 5)) - image_loss = losses_utils.contrastive_loss(logits_per_image) - caption_loss = losses_utils.contrastive_loss(logits_per_text) - - loss = args.ret_loss_scale * (image_loss + caption_loss) / 2.0 - losses.update(loss.item(), logits_per_image.size(0)) - top1_caption.update(all_caption_acc1.item(), logits_per_image.size(0)) - top5_caption.update(all_caption_acc5.item(), logits_per_image.size(0)) - top1_image.update(all_image_acc1.item(), logits_per_image.size(0)) - top5_image.update(all_image_acc5.item(), logits_per_image.size(0)) - - - batch_time = utils.AverageMeter('Time', ':6.3f', utils.Summary.AVERAGE) - losses = utils.AverageMeter('Loss', ':.4e', utils.Summary.AVERAGE) - ce_losses = utils.AverageMeter('CeLoss', ':.4e', utils.Summary.AVERAGE) - top1 = utils.AverageMeter('Acc@1', ':6.2f', utils.Summary.AVERAGE) - top5 = utils.AverageMeter('Acc@5', ':6.2f', utils.Summary.AVERAGE) - bleu1 = utils.AverageMeter('BLEU@1', ':6.2f', utils.Summary.AVERAGE) - bleu2 = utils.AverageMeter('BLEU@2', ':6.2f', utils.Summary.AVERAGE) - bleu3 = utils.AverageMeter('BLEU@3', ':6.2f', utils.Summary.AVERAGE) - bleu4 = utils.AverageMeter('BLEU@4', ':6.2f', utils.Summary.AVERAGE) - top1_caption = utils.AverageMeter('CaptionAcc@1', ':6.2f', utils.Summary.AVERAGE) - top5_caption = utils.AverageMeter('CaptionAcc@5', ':6.2f', utils.Summary.AVERAGE) - top1_image = utils.AverageMeter('ImageAcc@1', ':6.2f', utils.Summary.AVERAGE) - top5_image = utils.AverageMeter('ImageAcc@5', ':6.2f', utils.Summary.AVERAGE) - - progress = utils.ProgressMeter( - len(val_loader) + (args.distributed and (len(val_loader.sampler) * args.world_size < len(val_loader.dataset))), - [batch_time, losses, top1, top5, bleu4], - prefix='Test: ') - - # switch to evaluate mode - model.eval() - - run_validate(val_loader) - if args.distributed: - batch_time.all_reduce() - losses.all_reduce() - bleu1.all_reduce() - bleu2.all_reduce() - bleu3.all_reduce() - bleu4.all_reduce() - top1.all_reduce() - top5.all_reduce() - top1_caption.all_reduce() - top5_caption.all_reduce() - top1_image.all_reduce() - top5_image.all_reduce() - - if args.distributed and (len(val_loader.sampler) * args.world_size < len(val_loader.dataset)): - aux_val_dataset = Subset(val_loader.dataset, - range(len(val_loader.sampler) * args.world_size, len(val_loader.dataset))) - aux_val_loader = torch.utils.data.DataLoader( - aux_val_dataset, batch_size=(args.val_batch_size or args.batch_size), shuffle=False, - num_workers=args.workers, pin_memory=True, collate_fn=data.collate_fn) - run_validate(aux_val_loader, len(val_loader)) - - progress.display_summary() - - writer.add_scalar('val/total_secs_per_batch', batch_time.avg, actual_step) - writer.add_scalar('val/seq_top1_acc', top1.avg, actual_step) - writer.add_scalar('val/seq_top5_acc', top5.avg, actual_step) - writer.add_scalar('val/ce_loss', losses.avg, actual_step) - writer.add_scalar('val/bleu1', bleu1.avg, actual_step) - writer.add_scalar('val/bleu2', bleu2.avg, actual_step) - writer.add_scalar('val/bleu3', bleu3.avg, actual_step) - writer.add_scalar('val/bleu4', bleu4.avg, actual_step) - writer.add_scalar('val/contrastive_loss', losses.avg, actual_step) - writer.add_scalar('val/t2i_top1_acc', top1_caption.avg, actual_step) - writer.add_scalar('val/t2i_top5_acc', top5_caption.avg, actual_step) - writer.add_scalar('val/i2t_top1_acc', top1_image.avg, actual_step) - writer.add_scalar('val/i2t_top5_acc', top5_image.avg, actual_step) - writer.add_scalar('val/top1_acc', (top1_caption.avg + top1_image.avg) / 2.0, actual_step) - writer.add_scalar('val/top5_acc', (top5_caption.avg + top5_image.avg) / 2.0, actual_step) - - writer.close() - - # Use top1 accuracy as the metric for keeping the best checkpoint. - return top1_caption.avg diff --git a/spaces/amankishore/sjc/sd1/ldm/modules/diffusionmodules/model.py b/spaces/amankishore/sjc/sd1/ldm/modules/diffusionmodules/model.py deleted file mode 100644 index 2fa615e4e76ee0df6ad2e0341cb486f93ea1b36d..0000000000000000000000000000000000000000 --- a/spaces/amankishore/sjc/sd1/ldm/modules/diffusionmodules/model.py +++ /dev/null @@ -1,843 +0,0 @@ -# pytorch_diffusion + derived encoder decoder -import math -import torch -import torch.nn as nn -import numpy as np -from einops import rearrange - -from ldm.util import instantiate_from_config -from ldm.modules.attention import LinearAttention -import torch.nn.functional as F - -def get_timestep_embedding(timesteps, embedding_dim): - """ - This matches the implementation in Denoising Diffusion Probabilistic Models: - From Fairseq. - Build sinusoidal embeddings. - This matches the implementation in tensor2tensor, but differs slightly - from the description in Section 3.5 of "Attention Is All You Need". - """ - assert len(timesteps.shape) == 1 - - half_dim = embedding_dim // 2 - emb = math.log(10000) / (half_dim - 1) - emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb) - emb = emb.to(device=timesteps.device) - emb = timesteps.float()[:, None] * emb[None, :] - emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) - if embedding_dim % 2 == 1: # zero pad - emb = torch.nn.functional.pad(emb, (0,1,0,0)) - return emb - - -def nonlinearity(x): - # swish - return x*torch.sigmoid(x) - - -def Normalize(in_channels, num_groups=32): - return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True) - - -class Upsample(nn.Module): - def __init__(self, in_channels, with_conv): - super().__init__() - self.with_conv = with_conv - if self.with_conv: - self.conv = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x): - x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") - if self.with_conv: - x = self.conv(x) - return x - - -class Downsample(nn.Module): - def __init__(self, in_channels, with_conv): - super().__init__() - self.with_conv = with_conv - if self.with_conv: - # no asymmetric padding in torch conv, must do it ourselves - self.conv = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=3, - stride=2, - padding=0) - - def forward(self, x): - if self.with_conv: - pad = (0,1,0,1) - x = torch.nn.functional.pad(x, pad, mode="constant", value=0) - x = self.conv(x) - else: - x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) - return x - - -class ResnetBlock(nn.Module): - def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False, - dropout, temb_channels=512): - super().__init__() - self.in_channels = in_channels - out_channels = in_channels if out_channels is None else out_channels - self.out_channels = out_channels - self.use_conv_shortcut = conv_shortcut - - self.norm1 = Normalize(in_channels) - self.conv1 = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) - if temb_channels > 0: - self.temb_proj = torch.nn.Linear(temb_channels, - out_channels) - self.norm2 = Normalize(out_channels) - self.dropout = torch.nn.Dropout(dropout) - self.conv2 = torch.nn.Conv2d(out_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) - if self.in_channels != self.out_channels: - if self.use_conv_shortcut: - self.conv_shortcut = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) - else: - self.nin_shortcut = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=1, - stride=1, - padding=0) - - def forward(self, x, temb): - h = x - h = self.norm1(h) - h = nonlinearity(h) - h = self.conv1(h) - - if temb is not None: - h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None] - - h = self.norm2(h) - h = nonlinearity(h) - h = self.dropout(h) - h = self.conv2(h) - - if self.in_channels != self.out_channels: - if self.use_conv_shortcut: - x = self.conv_shortcut(x) - else: - x = self.nin_shortcut(x) - - return x+h - - -class LinAttnBlock(LinearAttention): - """to match AttnBlock usage""" - def __init__(self, in_channels): - super().__init__(dim=in_channels, heads=1, dim_head=in_channels) - - -class AttnBlock(nn.Module): - def __init__(self, in_channels): - super().__init__() - self.in_channels = in_channels - - self.norm = Normalize(in_channels) - self.q = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.k = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.v = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.proj_out = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - - - def forward(self, x): - h_ = x - h_ = self.norm(h_) - q = self.q(h_) - k = self.k(h_) - v = self.v(h_) - - # compute attention - b,c,h,w = q.shape - q = q.reshape(b,c,h*w) - q = q.permute(0,2,1) # b,hw,c - k = k.reshape(b,c,h*w) # b,c,hw - w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] - w_ = w_ * (int(c)**(-0.5)) - w_ = torch.nn.functional.softmax(w_, dim=2) - - # attend to values - v = v.reshape(b,c,h*w) - w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q) - h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] - h_ = h_.reshape(b,c,h,w) - - h_ = self.proj_out(h_) - - return x+h_ - - -def make_attn(in_channels, attn_type="vanilla"): - assert attn_type in ["vanilla", "linear", "none"], f'attn_type {attn_type} unknown' - print(f"making attention of type '{attn_type}' with {in_channels} in_channels") - if attn_type == "vanilla": - return AttnBlock(in_channels) - elif attn_type == "none": - return nn.Identity(in_channels) - else: - return LinAttnBlock(in_channels) - - -class Model(nn.Module): - def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, - resolution, use_timestep=True, use_linear_attn=False, attn_type="vanilla"): - super().__init__() - if use_linear_attn: attn_type = "linear" - self.ch = ch - self.temb_ch = self.ch*4 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - self.resolution = resolution - self.in_channels = in_channels - - self.use_timestep = use_timestep - if self.use_timestep: - # timestep embedding - self.temb = nn.Module() - self.temb.dense = nn.ModuleList([ - torch.nn.Linear(self.ch, - self.temb_ch), - torch.nn.Linear(self.temb_ch, - self.temb_ch), - ]) - - # downsampling - self.conv_in = torch.nn.Conv2d(in_channels, - self.ch, - kernel_size=3, - stride=1, - padding=1) - - curr_res = resolution - in_ch_mult = (1,)+tuple(ch_mult) - self.down = nn.ModuleList() - for i_level in range(self.num_resolutions): - block = nn.ModuleList() - attn = nn.ModuleList() - block_in = ch*in_ch_mult[i_level] - block_out = ch*ch_mult[i_level] - for i_block in range(self.num_res_blocks): - block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(make_attn(block_in, attn_type=attn_type)) - down = nn.Module() - down.block = block - down.attn = attn - if i_level != self.num_resolutions-1: - down.downsample = Downsample(block_in, resamp_with_conv) - curr_res = curr_res // 2 - self.down.append(down) - - # middle - self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) - self.mid.block_2 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - - # upsampling - self.up = nn.ModuleList() - for i_level in reversed(range(self.num_resolutions)): - block = nn.ModuleList() - attn = nn.ModuleList() - block_out = ch*ch_mult[i_level] - skip_in = ch*ch_mult[i_level] - for i_block in range(self.num_res_blocks+1): - if i_block == self.num_res_blocks: - skip_in = ch*in_ch_mult[i_level] - block.append(ResnetBlock(in_channels=block_in+skip_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(make_attn(block_in, attn_type=attn_type)) - up = nn.Module() - up.block = block - up.attn = attn - if i_level != 0: - up.upsample = Upsample(block_in, resamp_with_conv) - curr_res = curr_res * 2 - self.up.insert(0, up) # prepend to get consistent order - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - out_ch, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x, t=None, context=None): - #assert x.shape[2] == x.shape[3] == self.resolution - if context is not None: - # assume aligned context, cat along channel axis - x = torch.cat((x, context), dim=1) - if self.use_timestep: - # timestep embedding - assert t is not None - temb = get_timestep_embedding(t, self.ch) - temb = self.temb.dense[0](temb) - temb = nonlinearity(temb) - temb = self.temb.dense[1](temb) - else: - temb = None - - # downsampling - hs = [self.conv_in(x)] - for i_level in range(self.num_resolutions): - for i_block in range(self.num_res_blocks): - h = self.down[i_level].block[i_block](hs[-1], temb) - if len(self.down[i_level].attn) > 0: - h = self.down[i_level].attn[i_block](h) - hs.append(h) - if i_level != self.num_resolutions-1: - hs.append(self.down[i_level].downsample(hs[-1])) - - # middle - h = hs[-1] - h = self.mid.block_1(h, temb) - h = self.mid.attn_1(h) - h = self.mid.block_2(h, temb) - - # upsampling - for i_level in reversed(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks+1): - h = self.up[i_level].block[i_block]( - torch.cat([h, hs.pop()], dim=1), temb) - if len(self.up[i_level].attn) > 0: - h = self.up[i_level].attn[i_block](h) - if i_level != 0: - h = self.up[i_level].upsample(h) - - # end - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - return h - - def get_last_layer(self): - return self.conv_out.weight - - -class Encoder(nn.Module): - def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, - resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla", - **ignore_kwargs): - super().__init__() - if use_linear_attn: attn_type = "linear" - self.ch = ch - self.temb_ch = 0 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - self.resolution = resolution - self.in_channels = in_channels - - # downsampling - self.conv_in = torch.nn.Conv2d(in_channels, - self.ch, - kernel_size=3, - stride=1, - padding=1) - - curr_res = resolution - in_ch_mult = (1,)+tuple(ch_mult) - self.in_ch_mult = in_ch_mult - self.down = nn.ModuleList() - for i_level in range(self.num_resolutions): - block = nn.ModuleList() - attn = nn.ModuleList() - block_in = ch*in_ch_mult[i_level] - block_out = ch*ch_mult[i_level] - for i_block in range(self.num_res_blocks): - block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(make_attn(block_in, attn_type=attn_type)) - down = nn.Module() - down.block = block - down.attn = attn - if i_level != self.num_resolutions-1: - down.downsample = Downsample(block_in, resamp_with_conv) - curr_res = curr_res // 2 - self.down.append(down) - - # middle - self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) - self.mid.block_2 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - 2*z_channels if double_z else z_channels, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x): - # timestep embedding - temb = None - - # downsampling - hs = [self.conv_in(x)] - for i_level in range(self.num_resolutions): - for i_block in range(self.num_res_blocks): - h = self.down[i_level].block[i_block](hs[-1], temb) - if len(self.down[i_level].attn) > 0: - h = self.down[i_level].attn[i_block](h) - hs.append(h) - if i_level != self.num_resolutions-1: - hs.append(self.down[i_level].downsample(hs[-1])) - - # middle - h = hs[-1] - h = self.mid.block_1(h, temb) - h = self.mid.attn_1(h) - h = self.mid.block_2(h, temb) - - # end - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - return h - - -class Decoder(nn.Module): - def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, - resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False, - attn_type="vanilla", **ignorekwargs): - super().__init__() - if use_linear_attn: attn_type = "linear" - self.ch = ch - self.temb_ch = 0 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - self.resolution = resolution - self.in_channels = in_channels - self.give_pre_end = give_pre_end - self.tanh_out = tanh_out - - # compute in_ch_mult, block_in and curr_res at lowest res - in_ch_mult = (1,)+tuple(ch_mult) - block_in = ch*ch_mult[self.num_resolutions-1] - curr_res = resolution // 2**(self.num_resolutions-1) - self.z_shape = (1,z_channels,curr_res,curr_res) - print("Working with z of shape {} = {} dimensions.".format( - self.z_shape, np.prod(self.z_shape))) - - # z to block_in - self.conv_in = torch.nn.Conv2d(z_channels, - block_in, - kernel_size=3, - stride=1, - padding=1) - - # middle - self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) - self.mid.block_2 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - - # upsampling - self.up = nn.ModuleList() - for i_level in reversed(range(self.num_resolutions)): - block = nn.ModuleList() - attn = nn.ModuleList() - block_out = ch*ch_mult[i_level] - for i_block in range(self.num_res_blocks+1): - block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(make_attn(block_in, attn_type=attn_type)) - up = nn.Module() - up.block = block - up.attn = attn - if i_level != 0: - up.upsample = Upsample(block_in, resamp_with_conv) - curr_res = curr_res * 2 - self.up.insert(0, up) # prepend to get consistent order - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - out_ch, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, z): - #assert z.shape[1:] == self.z_shape[1:] - self.last_z_shape = z.shape - - # timestep embedding - temb = None - - # z to block_in - h = self.conv_in(z) - - # middle - h = self.mid.block_1(h, temb) - h = self.mid.attn_1(h) - h = self.mid.block_2(h, temb) - - # upsampling - for i_level in reversed(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks+1): - h = self.up[i_level].block[i_block](h, temb) - if len(self.up[i_level].attn) > 0: - h = self.up[i_level].attn[i_block](h) - if i_level != 0: - h = self.up[i_level].upsample(h) - - # end - if self.give_pre_end: - return h - - - h_fake = self.norm_out(h).type(torch.float16) - #h_fake = F.group_norm(h.float(),32,eps=1e-1).detach() - h = (h - h.mean([2,3],keepdims=True)) / h.std([2,3],keepdims=True) - #std_val = h_fake.std([2,3],keepdims=True) - #h = h*(0.5+h_fake.std([2,3],keepdims=True).type(torch.float16))+h_fake.mean([2,3],keepdims=True).type(torch.float16) - #h = h + h_fake.mean([2,3],keepdims=True).type(torch.float16) - h = h + (h_fake-h).detach() - h = nonlinearity(h) - h = self.conv_out(h) - if self.tanh_out: - h = torch.tanh(h) - - return h - - -class SimpleDecoder(nn.Module): - def __init__(self, in_channels, out_channels, *args, **kwargs): - super().__init__() - self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1), - ResnetBlock(in_channels=in_channels, - out_channels=2 * in_channels, - temb_channels=0, dropout=0.0), - ResnetBlock(in_channels=2 * in_channels, - out_channels=4 * in_channels, - temb_channels=0, dropout=0.0), - ResnetBlock(in_channels=4 * in_channels, - out_channels=2 * in_channels, - temb_channels=0, dropout=0.0), - nn.Conv2d(2*in_channels, in_channels, 1), - Upsample(in_channels, with_conv=True)]) - # end - self.norm_out = Normalize(in_channels) - self.conv_out = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x): - for i, layer in enumerate(self.model): - if i in [1,2,3]: - x = layer(x, None) - else: - x = layer(x) - - h = self.norm_out(x) - h = nonlinearity(h) - x = self.conv_out(h) - return x - - -class UpsampleDecoder(nn.Module): - def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution, - ch_mult=(2,2), dropout=0.0): - super().__init__() - # upsampling - self.temb_ch = 0 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - block_in = in_channels - curr_res = resolution // 2 ** (self.num_resolutions - 1) - self.res_blocks = nn.ModuleList() - self.upsample_blocks = nn.ModuleList() - for i_level in range(self.num_resolutions): - res_block = [] - block_out = ch * ch_mult[i_level] - for i_block in range(self.num_res_blocks + 1): - res_block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - self.res_blocks.append(nn.ModuleList(res_block)) - if i_level != self.num_resolutions - 1: - self.upsample_blocks.append(Upsample(block_in, True)) - curr_res = curr_res * 2 - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - out_channels, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x): - # upsampling - h = x - for k, i_level in enumerate(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks + 1): - h = self.res_blocks[i_level][i_block](h, None) - if i_level != self.num_resolutions - 1: - h = self.upsample_blocks[k](h) - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - return h - - -class LatentRescaler(nn.Module): - def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2): - super().__init__() - # residual block, interpolate, residual block - self.factor = factor - self.conv_in = nn.Conv2d(in_channels, - mid_channels, - kernel_size=3, - stride=1, - padding=1) - self.res_block1 = nn.ModuleList([ResnetBlock(in_channels=mid_channels, - out_channels=mid_channels, - temb_channels=0, - dropout=0.0) for _ in range(depth)]) - self.attn = AttnBlock(mid_channels) - self.res_block2 = nn.ModuleList([ResnetBlock(in_channels=mid_channels, - out_channels=mid_channels, - temb_channels=0, - dropout=0.0) for _ in range(depth)]) - - self.conv_out = nn.Conv2d(mid_channels, - out_channels, - kernel_size=1, - ) - - def forward(self, x): - x = self.conv_in(x) - for block in self.res_block1: - x = block(x, None) - x = torch.nn.functional.interpolate(x, size=(int(round(x.shape[2]*self.factor)), int(round(x.shape[3]*self.factor)))) - x = self.attn(x) - for block in self.res_block2: - x = block(x, None) - x = self.conv_out(x) - return x - - -class MergedRescaleEncoder(nn.Module): - def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, - ch_mult=(1,2,4,8), rescale_factor=1.0, rescale_module_depth=1): - super().__init__() - intermediate_chn = ch * ch_mult[-1] - self.encoder = Encoder(in_channels=in_channels, num_res_blocks=num_res_blocks, ch=ch, ch_mult=ch_mult, - z_channels=intermediate_chn, double_z=False, resolution=resolution, - attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv, - out_ch=None) - self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=intermediate_chn, - mid_channels=intermediate_chn, out_channels=out_ch, depth=rescale_module_depth) - - def forward(self, x): - x = self.encoder(x) - x = self.rescaler(x) - return x - - -class MergedRescaleDecoder(nn.Module): - def __init__(self, z_channels, out_ch, resolution, num_res_blocks, attn_resolutions, ch, ch_mult=(1,2,4,8), - dropout=0.0, resamp_with_conv=True, rescale_factor=1.0, rescale_module_depth=1): - super().__init__() - tmp_chn = z_channels*ch_mult[-1] - self.decoder = Decoder(out_ch=out_ch, z_channels=tmp_chn, attn_resolutions=attn_resolutions, dropout=dropout, - resamp_with_conv=resamp_with_conv, in_channels=None, num_res_blocks=num_res_blocks, - ch_mult=ch_mult, resolution=resolution, ch=ch) - self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=z_channels, mid_channels=tmp_chn, - out_channels=tmp_chn, depth=rescale_module_depth) - - def forward(self, x): - x = self.rescaler(x) - x = self.decoder(x) - return x - - -class Upsampler(nn.Module): - def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2): - super().__init__() - assert out_size >= in_size - num_blocks = int(np.log2(out_size//in_size))+1 - factor_up = 1.+ (out_size % in_size) - print(f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}") - self.rescaler = LatentRescaler(factor=factor_up, in_channels=in_channels, mid_channels=2*in_channels, - out_channels=in_channels) - self.decoder = Decoder(out_ch=out_channels, resolution=out_size, z_channels=in_channels, num_res_blocks=2, - attn_resolutions=[], in_channels=None, ch=in_channels, - ch_mult=[ch_mult for _ in range(num_blocks)]) - - def forward(self, x): - x = self.rescaler(x) - x = self.decoder(x) - return x - - -class Resize(nn.Module): - def __init__(self, in_channels=None, learned=False, mode="bilinear"): - super().__init__() - self.with_conv = learned - self.mode = mode - if self.with_conv: - print(f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode") - raise NotImplementedError() - assert in_channels is not None - # no asymmetric padding in torch conv, must do it ourselves - self.conv = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=4, - stride=2, - padding=1) - - def forward(self, x, scale_factor=1.0): - if scale_factor==1.0: - return x - else: - x = torch.nn.functional.interpolate(x, mode=self.mode, align_corners=False, scale_factor=scale_factor) - return x - -class FirstStagePostProcessor(nn.Module): - - def __init__(self, ch_mult:list, in_channels, - pretrained_model:nn.Module=None, - reshape=False, - n_channels=None, - dropout=0., - pretrained_config=None): - super().__init__() - if pretrained_config is None: - assert pretrained_model is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' - self.pretrained_model = pretrained_model - else: - assert pretrained_config is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' - self.instantiate_pretrained(pretrained_config) - - self.do_reshape = reshape - - if n_channels is None: - n_channels = self.pretrained_model.encoder.ch - - self.proj_norm = Normalize(in_channels,num_groups=in_channels//2) - self.proj = nn.Conv2d(in_channels,n_channels,kernel_size=3, - stride=1,padding=1) - - blocks = [] - downs = [] - ch_in = n_channels - for m in ch_mult: - blocks.append(ResnetBlock(in_channels=ch_in,out_channels=m*n_channels,dropout=dropout)) - ch_in = m * n_channels - downs.append(Downsample(ch_in, with_conv=False)) - - self.model = nn.ModuleList(blocks) - self.downsampler = nn.ModuleList(downs) - - - def instantiate_pretrained(self, config): - model = instantiate_from_config(config) - self.pretrained_model = model.eval() - # self.pretrained_model.train = False - for param in self.pretrained_model.parameters(): - param.requires_grad = False - - - @torch.no_grad() - def encode_with_pretrained(self,x): - c = self.pretrained_model.encode(x) - if isinstance(c, DiagonalGaussianDistribution): - c = c.mode() - return c - - def forward(self,x): - z_fs = self.encode_with_pretrained(x) - z = self.proj_norm(z_fs) - z = self.proj(z) - z = nonlinearity(z) - - for submodel, downmodel in zip(self.model,self.downsampler): - z = submodel(z,temb=None) - z = downmodel(z) - - if self.do_reshape: - z = rearrange(z,'b c h w -> b (h w) c') - return z - diff --git a/spaces/apsys/HSSR/model.py b/spaces/apsys/HSSR/model.py deleted file mode 100644 index 86c46fec4f8f9b8f290eb7646cd6092c2e4518d4..0000000000000000000000000000000000000000 --- a/spaces/apsys/HSSR/model.py +++ /dev/null @@ -1,923 +0,0 @@ -# ----------------------------------------------------------------------------------- -# SwinIR: Image Restoration Using Swin Transformer, https://arxiv.org/abs/2108.10257 -# Originally Written by Ze Liu, Modified by Jingyun Liang. -# ----------------------------------------------------------------------------------- - -import math -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.checkpoint as checkpoint -from timm.models.layers import DropPath, to_2tuple, trunc_normal_ - - -from torch.nn.parameter import Parameter - -class ChannelAttention(nn.Module): - """Channel attention used in RCAN. - Args: - num_feat (int): Channel number of intermediate features. - squeeze_factor (int): Channel squeeze factor. Default: 16. - """ - - def __init__(self, num_feat, squeeze_factor=16): - super(ChannelAttention, self).__init__() - self.attention = nn.Sequential( - nn.AdaptiveAvgPool2d(1), - nn.Conv2d(num_feat, num_feat // squeeze_factor, 1, padding=0), - nn.ReLU(inplace=True), - nn.Conv2d(num_feat // squeeze_factor, num_feat, 1, padding=0), - nn.Sigmoid()) - - def forward(self, x): - y = self.attention(x) - return x * y - - -class CAB(nn.Module): - - def __init__(self, num_feat, compress_ratio=3, squeeze_factor=16): - super(CAB, self).__init__() - - self.cab = nn.Sequential( - nn.Conv2d(num_feat, num_feat // compress_ratio, 3, 1, 1), - nn.GELU(), - nn.Conv2d(num_feat // compress_ratio, num_feat, 3, 1, 1), - ChannelAttention(num_feat, squeeze_factor) - ) - - def forward(self, x): - return self.cab(x) - - - - -class MLP(nn.Module): - def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -def window_partition(x, window_size): - """ - Args: - x: (B, H, W, C) - window_size (int): window size - - Returns: - windows: (num_windows*B, window_size, window_size, C) - """ - B, H, W, C = x.shape - x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) - windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) - return windows - - -def window_reverse(windows, window_size, H, W): - """ - Args: - windows: (num_windows*B, window_size, window_size, C) - window_size (int): Window size - H (int): Height of image - W (int): Width of image - - Returns: - x: (B, H, W, C) - """ - B = int(windows.shape[0] / (H * W / window_size / window_size)) - x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) - x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) - return x - - -class WindowAttention(nn.Module): - r""" Window based multi-head self attention (W-MSA) module with relative position bias. - It supports both of shifted and non-shifted window. - - Args: - dim (int): Number of input channels. - window_size (tuple[int]): The height and width of the window. - num_heads (int): Number of attention heads. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set - attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 - proj_drop (float, optional): Dropout ratio of output. Default: 0.0 - """ - - def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.): - - super().__init__() - self.dim = dim - self.window_size = window_size # Wh, Ww - self.num_heads = num_heads - head_dim = dim // num_heads - self.scale = qk_scale or head_dim ** -0.5 - - # define a parameter table of relative position bias - self.relative_position_bias_table = nn.Parameter( - torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH - - # get pair-wise relative position index for each token inside the window - coords_h = torch.arange(self.window_size[0]) - coords_w = torch.arange(self.window_size[1]) - coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww - coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww - relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww - relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 - relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 - relative_coords[:, :, 1] += self.window_size[1] - 1 - relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 - relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww - self.register_buffer("relative_position_index", relative_position_index) - - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - - self.proj_drop = nn.Dropout(proj_drop) - - trunc_normal_(self.relative_position_bias_table, std=.02) - self.softmax = nn.Softmax(dim=-1) - - def forward(self, x, mask=None): - """ - Args: - x: input features with shape of (num_windows*B, N, C) - mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None - """ - B_, N, C = x.shape - qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) - q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) - - q = q * self.scale - attn = (q @ k.transpose(-2, -1)) - - relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( - self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH - relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww - attn = attn + relative_position_bias.unsqueeze(0) - - if mask is not None: - nW = mask.shape[0] - attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) - attn = attn.view(-1, self.num_heads, N, N) - attn = self.softmax(attn) - else: - attn = self.softmax(attn) - - attn = self.attn_drop(attn) - - x = (attn @ v).transpose(1, 2).reshape(B_, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x - - def extra_repr(self) -> str: - return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}' - - def flops(self, N): - # calculate flops for 1 window with token length of N - flops = 0 - # qkv = self.qkv(x) - flops += N * self.dim * 3 * self.dim - # attn = (q @ k.transpose(-2, -1)) - flops += self.num_heads * N * (self.dim // self.num_heads) * N - # x = (attn @ v) - flops += self.num_heads * N * N * (self.dim // self.num_heads) - # x = self.proj(x) - flops += N * self.dim * self.dim - return flops - - -class SwinTransformerBlock(nn.Module): - r""" Swin Transformer Block. - - Args: - dim (int): Number of input channels. - input_resolution (tuple[int]): Input resulotion. - num_heads (int): Number of attention heads. - window_size (int): Window size. - shift_size (int): Shift size for SW-MSA. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float, optional): Stochastic depth rate. Default: 0.0 - act_layer (nn.Module, optional): Activation layer. Default: nn.GELU - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - """ - - def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0, - mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0., - act_layer=nn.GELU, norm_layer=nn.LayerNorm): - super().__init__() - self.dim = dim - self.input_resolution = input_resolution - self.num_heads = num_heads - self.window_size = window_size - self.shift_size = shift_size - self.mlp_ratio = mlp_ratio - if min(self.input_resolution) <= self.window_size: - # if window size is larger than input resolution, we don't partition windows - self.shift_size = 0 - self.window_size = min(self.input_resolution) - assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" - - self.norm1 = norm_layer(dim) - self.attn = WindowAttention( - dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, - qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) - - self.c_attn = CAB(dim) - - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = MLP(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - if self.shift_size > 0: - attn_mask = self.calculate_mask(self.input_resolution) - else: - attn_mask = None - - self.register_buffer("attn_mask", attn_mask) - - def calculate_mask(self, x_size): - # calculate attention mask for SW-MSA - H, W = x_size - img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 - h_slices = (slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None)) - w_slices = (slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None)) - cnt = 0 - for h in h_slices: - for w in w_slices: - img_mask[:, h, w, :] = cnt - cnt += 1 - - mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 - mask_windows = mask_windows.view(-1, self.window_size * self.window_size) - attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) - attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) - - return attn_mask - - def camask(self,input_size, num_channels, prob=0.5): - mask = torch.ones((*input_size,num_channels)) - mask = torch.bernoulli(prob * mask) - # print(mask.shape) - return mask - - def forward(self, x, x_size): - H, W = x_size - B, L, C = x.shape - # assert L == H * W, "input feature has wrong size" - - shortcut = x - x = self.norm1(x) - x = x.view(B, H, W, C) - cshortcut = x - - - # cyclic shift - if self.shift_size > 0: - shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) - else: - shifted_x = x - - # partition windows - x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C - x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C - - # W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size - if self.input_resolution == x_size: - attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C - else: - attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device)) - - # merge windows - attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) - shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C - - # reverse cyclic shift - if self.shift_size > 0: - x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) - else: - x = shifted_x - x = x.view(B, H * W, C) - # print("Shortcut size:", cshortcut.shape) - if self.input_resolution == x_size: - cx = self.c_attn(cshortcut.permute(0, 3, 1, 2)).permute(0, 2, 3, 1).contiguous().view(B,H*W,C) - else: - cx = self.c_attn(cshortcut.permute(0, 3, 1, 2)).permute(0, 2, 3, 1).contiguous().view(B,H*W,C) - - # FFN - x = shortcut + self.drop_path(x) + cx - x = x + self.drop_path(self.norm2(self.mlp(x))) - - return x - - def extra_repr(self) -> str: - return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \ - f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}" - - def flops(self): - flops = 0 - H, W = self.input_resolution - # norm1 - flops += self.dim * H * W - # W-MSA/SW-MSA - nW = H * W / self.window_size / self.window_size - flops += nW * self.attn.flops(self.window_size * self.window_size) - # mlp - flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio - # norm2 - flops += self.dim * H * W - return flops - - -class PatchMerging(nn.Module): - r""" Patch Merging Layer. - - Args: - input_resolution (tuple[int]): Resolution of input feature. - dim (int): Number of input channels. - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - """ - - def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): - super().__init__() - self.input_resolution = input_resolution - self.dim = dim - self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) - self.norm = norm_layer(4 * dim) - - def forward(self, x): - """ - x: B, H*W, C - """ - H, W = self.input_resolution - B, L, C = x.shape - assert L == H * W, "input feature has wrong size" - assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even." - - x = x.view(B, H, W, C) - - x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C - x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C - x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C - x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C - x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C - x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C - - x = self.norm(x) - x = self.reduction(x) - - return x - - def extra_repr(self) -> str: - return f"input_resolution={self.input_resolution}, dim={self.dim}" - - def flops(self): - H, W = self.input_resolution - flops = H * W * self.dim - flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim - return flops - - -class BasicLayer(nn.Module): - """ A basic Swin Transformer layer for one stage. - - Args: - dim (int): Number of input channels. - input_resolution (tuple[int]): Input resolution. - depth (int): Number of blocks. - num_heads (int): Number of attention heads. - window_size (int): Local window size. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. - """ - - def __init__(self, dim, input_resolution, depth, num_heads, window_size, - mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., - drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False): - - super().__init__() - self.dim = dim - self.input_resolution = input_resolution - self.depth = depth - self.use_checkpoint = use_checkpoint - - # build blocks - self.blocks = nn.ModuleList([ - SwinTransformerBlock(dim=dim, input_resolution=input_resolution, - num_heads=num_heads, window_size=window_size, - shift_size=0 if (i % 2 == 0) else window_size // 2, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop, attn_drop=attn_drop, - drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, - norm_layer=norm_layer) - for i in range(depth)]) - - # patch merging layer - if downsample is not None: - self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer) - else: - self.downsample = None - - def forward(self, x, x_size): - for blk in self.blocks: - if self.use_checkpoint: - x = checkpoint.checkpoint(blk, x, x_size) - else: - x = blk(x, x_size) - if self.downsample is not None: - x = self.downsample(x) - return x - - def extra_repr(self) -> str: - return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}" - - def flops(self): - flops = 0 - for blk in self.blocks: - flops += blk.flops() - if self.downsample is not None: - flops += self.downsample.flops() - return flops - - -class RSTB(nn.Module): - """Residual Swin Transformer Block (RSTB). - - Args: - dim (int): Number of input channels. - input_resolution (tuple[int]): Input resolution. - depth (int): Number of blocks. - num_heads (int): Number of attention heads. - window_size (int): Local window size. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. - img_size: Input image size. - patch_size: Patch size. - resi_connection: The convolutional block before residual connection. - """ - - def __init__(self, dim, input_resolution, depth, num_heads, window_size, - mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., - drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False, - img_size=224, patch_size=4, resi_connection='1conv'): - super(RSTB, self).__init__() - - self.dim = dim - self.input_resolution = input_resolution - - self.residual_group = BasicLayer(dim=dim, - input_resolution=input_resolution, - depth=depth, - num_heads=num_heads, - window_size=window_size, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop, attn_drop=attn_drop, - drop_path=drop_path, - norm_layer=norm_layer, - downsample=downsample, - use_checkpoint=use_checkpoint) - - if resi_connection == '1conv': - self.conv = nn.Conv2d(dim, dim, 3, 1, 1) - elif resi_connection == '3conv': - # to save parameters and memory - self.conv = nn.Sequential(nn.Conv2d(dim, dim // 4, 3, 1, 1), - nn.LeakyReLU(negative_slope=0.2, inplace=True), - nn.Conv2d(dim // 4, dim // 4, 1, 1, 0), - nn.LeakyReLU(negative_slope=0.2, inplace=True), - nn.Conv2d(dim // 4, dim, 3, 1, 1)) - - self.patch_embed = PatchEmbed( - img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim, - norm_layer=None) - - self.patch_unembed = PatchUnEmbed( - img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim, - norm_layer=None) - - def forward(self, x, x_size): - return self.patch_embed(self.conv(self.patch_unembed(self.residual_group(x, x_size), x_size))) + x - - def flops(self): - flops = 0 - flops += self.residual_group.flops() - H, W = self.input_resolution - flops += H * W * self.dim * self.dim * 9 - flops += self.patch_embed.flops() - flops += self.patch_unembed.flops() - - return flops - - -class PatchEmbed(nn.Module): - r""" Image to Patch Embedding - - Args: - img_size (int): Image size. Default: 224. - patch_size (int): Patch token size. Default: 4. - in_chans (int): Number of input image channels. Default: 3. - embed_dim (int): Number of linear projection output channels. Default: 96. - norm_layer (nn.Module, optional): Normalization layer. Default: None - """ - - def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): - super().__init__() - img_size = to_2tuple(img_size) - patch_size = to_2tuple(patch_size) - patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]] - self.img_size = img_size - self.patch_size = patch_size - self.patches_resolution = patches_resolution - self.num_patches = patches_resolution[0] * patches_resolution[1] - - self.in_chans = in_chans - self.embed_dim = embed_dim - - if norm_layer is not None: - self.norm = norm_layer(embed_dim) - else: - self.norm = None - - def forward(self, x): - x = x.flatten(2).transpose(1, 2) # B Ph*Pw C - if self.norm is not None: - x = self.norm(x) - return x - - def flops(self): - flops = 0 - H, W = self.img_size - if self.norm is not None: - flops += H * W * self.embed_dim - return flops - - -class PatchUnEmbed(nn.Module): - r""" Image to Patch Unembedding - - Args: - img_size (int): Image size. Default: 224. - patch_size (int): Patch token size. Default: 4. - in_chans (int): Number of input image channels. Default: 3. - embed_dim (int): Number of linear projection output channels. Default: 96. - norm_layer (nn.Module, optional): Normalization layer. Default: None - """ - - def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): - super().__init__() - img_size = to_2tuple(img_size) - patch_size = to_2tuple(patch_size) - patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]] - self.img_size = img_size - self.patch_size = patch_size - self.patches_resolution = patches_resolution - self.num_patches = patches_resolution[0] * patches_resolution[1] - - self.in_chans = in_chans - self.embed_dim = embed_dim - - def forward(self, x, x_size): - B, HW, C = x.shape - x = x.transpose(1, 2).view(B, self.embed_dim, x_size[0], x_size[1]) # B Ph*Pw C - return x - - def flops(self): - flops = 0 - return flops - - -class Upsample(nn.Sequential): - """Upsample module. - - Args: - scale (int): Scale factor. Supported scales: 2^n and 3. - num_feat (int): Channel number of intermediate features. - """ - - def __init__(self, scale, num_feat): - m = [] - if (scale & (scale - 1)) == 0: # scale = 2^n - for _ in range(int(math.log(scale, 2))): - m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1)) - m.append(nn.PixelShuffle(2)) - elif scale == 3: - m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1)) - m.append(nn.PixelShuffle(3)) - else: - raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.') - super(Upsample, self).__init__(*m) - - -class UpsampleOneStep(nn.Sequential): - """UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle) - Used in lightweight SR to save parameters. - - Args: - scale (int): Scale factor. Supported scales: 2^n and 3. - num_feat (int): Channel number of intermediate features. - - """ - - def __init__(self, scale, num_feat, num_out_ch, input_resolution=None): - self.num_feat = num_feat - self.input_resolution = input_resolution - m = [] - m.append(nn.Conv2d(num_feat, (scale ** 2) * num_out_ch, 3, 1, 1)) - m.append(nn.PixelShuffle(scale)) - super(UpsampleOneStep, self).__init__(*m) - - def flops(self): - H, W = self.input_resolution - flops = H * W * self.num_feat * 3 * 9 - return flops - - -class SwinIR(nn.Module): - r""" SwinIR - A PyTorch impl of : `SwinIR: Image Restoration Using Swin Transformer`, based on Swin Transformer. - - Args: - img_size (int | tuple(int)): Input image size. Default 64 - patch_size (int | tuple(int)): Patch size. Default: 1 - in_chans (int): Number of input image channels. Default: 3 - embed_dim (int): Patch embedding dimension. Default: 96 - depths (tuple(int)): Depth of each Swin Transformer layer. - num_heads (tuple(int)): Number of attention heads in different layers. - window_size (int): Window size. Default: 7 - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 - qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None - drop_rate (float): Dropout rate. Default: 0 - attn_drop_rate (float): Attention dropout rate. Default: 0 - drop_path_rate (float): Stochastic depth rate. Default: 0.1 - norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. - ape (bool): If True, add absolute position embedding to the patch embedding. Default: False - patch_norm (bool): If True, add normalization after patch embedding. Default: True - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False - upscale: Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction - img_range: Image range. 1. or 255. - upsampler: The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None - resi_connection: The convolutional block before residual connection. '1conv'/'3conv' - """ - - def __init__(self, img_size=64, patch_size=1, in_chans=3, - embed_dim=64, depths=[4, 4, 4, 4], num_heads=[4, 4, 4, 4], - window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None, - drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, - norm_layer=nn.LayerNorm, ape=False, patch_norm=True, - use_checkpoint=False, upscale=4, img_range=1., upsampler='', resi_connection='1conv', - **kwargs): - super(SwinIR, self).__init__() - num_in_ch = in_chans - num_out_ch = in_chans - num_feat = 64 - self.img_range = img_range - if in_chans == 3: - rgb_mean = (0.4488, 0.4371, 0.4040) - self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1) - else: - self.mean = torch.zeros(1, 1, 1, 1) - self.upscale = upscale - self.upsampler = upsampler - self.window_size = window_size - - ##################################################################################################### - ################################### 1, shallow feature extraction ################################### - self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1) - - ##################################################################################################### - ################################### 2, deep feature extraction ###################################### - self.num_layers = len(depths) - self.embed_dim = embed_dim - self.ape = ape - self.patch_norm = patch_norm - self.num_features = embed_dim - self.mlp_ratio = mlp_ratio - - # split image into non-overlapping patches - self.patch_embed = PatchEmbed( - img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim, - norm_layer=norm_layer if self.patch_norm else None) - num_patches = self.patch_embed.num_patches - patches_resolution = self.patch_embed.patches_resolution - self.patches_resolution = patches_resolution - - # merge non-overlapping patches into image - self.patch_unembed = PatchUnEmbed( - img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim, - norm_layer=norm_layer if self.patch_norm else None) - - # absolute position embedding - if self.ape: - self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) - trunc_normal_(self.absolute_pos_embed, std=.02) - - self.pos_drop = nn.Dropout(p=drop_rate) - - # stochastic depth - dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule - - # build Residual Swin Transformer blocks (RSTB) - self.layers = nn.ModuleList() - for i_layer in range(self.num_layers): - layer = RSTB(dim=embed_dim, - input_resolution=(patches_resolution[0], - patches_resolution[1]), - depth=depths[i_layer], - num_heads=num_heads[i_layer], - window_size=window_size, - mlp_ratio=self.mlp_ratio, - qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, - drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results - norm_layer=norm_layer, - downsample=None, - use_checkpoint=use_checkpoint, - img_size=img_size, - patch_size=patch_size, - resi_connection=resi_connection - - ) - self.layers.append(layer) - self.norm = norm_layer(self.num_features) - - # build the last conv layer in deep feature extraction - if resi_connection == '1conv': - self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1) - elif resi_connection == '3conv': - # to save parameters and memory - self.conv_after_body = nn.Sequential(nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1), - nn.LeakyReLU(negative_slope=0.2, inplace=True), - nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0), - nn.LeakyReLU(negative_slope=0.2, inplace=True), - nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1)) - - ##################################################################################################### - ################################ 3, high quality image reconstruction ################################ - if self.upsampler == 'pixelshuffle': - # for classical SR - self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1), - nn.LeakyReLU(inplace=True)) - self.upsample = Upsample(upscale, num_feat) - self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) - elif self.upsampler == 'pixelshuffledirect': - # for lightweight SR (to save parameters) - self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch, - (patches_resolution[0], patches_resolution[1])) - elif self.upsampler == 'nearest+conv': - # for real-world SR (less artifacts) - self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1), - nn.LeakyReLU(inplace=True)) - self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) - if self.upscale == 4: - self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) - self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1) - self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) - self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) - else: - # for image denoising and JPEG compression artifact reduction - self.conv_last = nn.Conv2d(embed_dim, num_out_ch, 3, 1, 1) - - self.apply(self._init_weights) - - def _init_weights(self, m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight, std=.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - @torch.jit.ignore - def no_weight_decay(self): - return {'absolute_pos_embed'} - - @torch.jit.ignore - def no_weight_decay_keywords(self): - return {'relative_position_bias_table'} - - def check_image_size(self, x): - _, _, h, w = x.size() - mod_pad_h = (self.window_size - h % self.window_size) % self.window_size - mod_pad_w = (self.window_size - w % self.window_size) % self.window_size - x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect') - return x - - def forward_features(self, x): - x_size = (x.shape[2], x.shape[3]) - x = self.patch_embed(x) - if self.ape: - x = x + self.absolute_pos_embed - x = self.pos_drop(x) - - for layer in self.layers: - x = layer(x, x_size) - - x = self.norm(x) # B L C - x = self.patch_unembed(x, x_size) - - return x - - def forward(self, x): - H, W = x.shape[2:] - x = self.check_image_size(x) - - self.mean = self.mean.type_as(x) - x = (x - self.mean) * self.img_range - - if self.upsampler == 'pixelshuffle': - # for classical SR - x = self.conv_first(x) - x = self.conv_after_body(self.forward_features(x)) + x - x = self.conv_before_upsample(x) - x = self.conv_last(self.upsample(x)) - elif self.upsampler == 'pixelshuffledirect': - # for lightweight SR - x = self.conv_first(x) - x = self.conv_after_body(self.forward_features(x)) + x - x = self.upsample(x) - elif self.upsampler == 'nearest+conv': - # for real-world SR - x = self.conv_first(x) - x = self.conv_after_body(self.forward_features(x)) + x - x = self.conv_before_upsample(x) - x = self.lrelu(self.conv_up1(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest'))) - if self.upscale == 4: - x = self.lrelu(self.conv_up2(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest'))) - x = self.conv_last(self.lrelu(self.conv_hr(x))) - else: - # for image denoising and JPEG compression artifact reduction - x_first = self.conv_first(x) - res = self.conv_after_body(self.forward_features(x_first)) + x_first - x = x + self.conv_last(res) - - x = x / self.img_range + self.mean - - return x[:, :, :H*self.upscale, :W*self.upscale] - - def flops(self): - flops = 0 - H, W = self.patches_resolution - flops += H * W * 3 * self.embed_dim * 9 - flops += self.patch_embed.flops() - for i, layer in enumerate(self.layers): - flops += layer.flops() - flops += H * W * 3 * self.embed_dim * self.embed_dim - flops += self.upsample.flops() - return flops - - -if __name__ == '__main__': - upscale = 4 - window_size = 8 - height = (1024 // upscale // window_size + 1) * window_size - width = (720 // upscale // window_size + 1) * window_size - model = SwinIR(upscale=4, img_size=(height, width), - window_size=window_size, img_range=1., depths=[3, 3, 3, 3], - embed_dim=60, num_heads=[3, 3, 3, 3], mlp_ratio=1.5, upsampler='pixelshuffledirect') - print(model) - print(height, width, model.flops() / 1e9) - x = torch.randn((1, 3, height, width)) - x = model(x) - print(x.shape) diff --git a/spaces/arundevops47/chatbot-with-langchain-and-pinecone/README.md b/spaces/arundevops47/chatbot-with-langchain-and-pinecone/README.md deleted file mode 100644 index bc6d0a4241bc04d3146860133eb03ca597dd7665..0000000000000000000000000000000000000000 --- a/spaces/arundevops47/chatbot-with-langchain-and-pinecone/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Chatbot With Langchain And Pinecone -emoji: 🐢 -colorFrom: green -colorTo: red -sdk: streamlit -sdk_version: 1.19.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/vegalite/v4/schema/__init__.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/vegalite/v4/schema/__init__.py deleted file mode 100644 index 618a50350bc74acb21a60987a78f62535c70b8b2..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/vegalite/v4/schema/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# flake8: noqa -from .core import * -from .channels import * -SCHEMA_VERSION = 'v4.17.0' -SCHEMA_URL = 'https://vega.github.io/schema/vega-lite/v4.17.0.json' diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/antlr4/error/__init__.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/antlr4/error/__init__.py deleted file mode 100644 index 216c000dc5ffc8e53cc9c596e420c1e67604d1aa..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/antlr4/error/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__author__ = 'ericvergnaud' diff --git a/spaces/asas-ai/Arabic-LLM-Leaderboard/README.md b/spaces/asas-ai/Arabic-LLM-Leaderboard/README.md deleted file mode 100644 index 1b8a3abdb8a0aeb7005130a72e2dd3b34a7b800f..0000000000000000000000000000000000000000 --- a/spaces/asas-ai/Arabic-LLM-Leaderboard/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Arabic LLM Leaderboard -emoji: 🏆 -colorFrom: gray -colorTo: indigo -sdk: streamlit -sdk_version: 1.24.0 -app_file: app.py -pinned: true ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/avatar2k/02-H5-AR-VR-IOT/style.css b/spaces/avatar2k/02-H5-AR-VR-IOT/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/avatar2k/02-H5-AR-VR-IOT/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/awacke1/ChatGPT-QA-Translation-Summary-14/htmlTemplates.py b/spaces/awacke1/ChatGPT-QA-Translation-Summary-14/htmlTemplates.py deleted file mode 100644 index 036bb02bbc7a0bc4ae4614dc5bf528403ddbedd0..0000000000000000000000000000000000000000 --- a/spaces/awacke1/ChatGPT-QA-Translation-Summary-14/htmlTemplates.py +++ /dev/null @@ -1,44 +0,0 @@ -css = ''' -"].join(""),l.id=h,(m?l:n).innerHTML+=f,n.appendChild(l),m||(n.style.background="",n.style.overflow="hidden",k=g.style.overflow,g.style.overflow="hidden",g.appendChild(n)),i=c(l,a),m?l.parentNode.removeChild(l):(n.parentNode.removeChild(n),g.style.overflow=k),!!i},z=function(b){var c=a.matchMedia||a.msMatchMedia;if(c)return c(b).matches;var d;return y("@media "+b+" { #"+h+" { position: absolute; } }",function(b){d=(a.getComputedStyle?getComputedStyle(b,null):b.currentStyle)["position"]=="absolute"}),d},A=function(){function d(d,e){e=e||b.createElement(a[d]||"div"),d="on"+d;var f=d in e;return f||(e.setAttribute||(e=b.createElement("div")),e.setAttribute&&e.removeAttribute&&(e.setAttribute(d,""),f=F(e[d],"function"),F(e[d],"undefined")||(e[d]=c),e.removeAttribute(d))),e=null,f}var a={select:"input",change:"input",submit:"form",reset:"form",error:"img",load:"img",abort:"img"};return d}(),B={}.hasOwnProperty,C;!F(B,"undefined")&&!F(B.call,"undefined")?C=function(a,b){return B.call(a,b)}:C=function(a,b){return b in a&&F(a.constructor.prototype[b],"undefined")},Function.prototype.bind||(Function.prototype.bind=function(b){var c=this;if(typeof c!="function")throw new TypeError;var d=w.call(arguments,1),e=function(){if(this instanceof e){var a=function(){};a.prototype=c.prototype;var f=new a,g=c.apply(f,d.concat(w.call(arguments)));return Object(g)===g?g:f}return c.apply(b,d.concat(w.call(arguments)))};return e}),s.flexbox=function(){return J("flexWrap")},s.canvas=function(){var a=b.createElement("canvas");return!!a.getContext&&!!a.getContext("2d")},s.canvastext=function(){return!!e.canvas&&!!F(b.createElement("canvas").getContext("2d").fillText,"function")},s.webgl=function(){return!!a.WebGLRenderingContext},s.touch=function(){var c;return"ontouchstart"in a||a.DocumentTouch&&b instanceof DocumentTouch?c=!0:y(["@media (",n.join("touch-enabled),("),h,")","{#modernizr{top:9px;position:absolute}}"].join(""),function(a){c=a.offsetTop===9}),c},s.geolocation=function(){return"geolocation"in navigator},s.postmessage=function(){return!!a.postMessage},s.websqldatabase=function(){return!!a.openDatabase},s.indexedDB=function(){return!!J("indexedDB",a)},s.hashchange=function(){return A("hashchange",a)&&(b.documentMode===c||b.documentMode>7)},s.history=function(){return!!a.history&&!!history.pushState},s.draganddrop=function(){var a=b.createElement("div");return"draggable"in a||"ondragstart"in a&&"ondrop"in a},s.websockets=function(){return"WebSocket"in a||"MozWebSocket"in a},s.rgba=function(){return D("background-color:rgba(150,255,150,.5)"),G(j.backgroundColor,"rgba")},s.hsla=function(){return D("background-color:hsla(120,40%,100%,.5)"),G(j.backgroundColor,"rgba")||G(j.backgroundColor,"hsla")},s.multiplebgs=function(){return D("background:url(https://),url(https://),red url(https://)"),/(url\s*\(.*?){3}/.test(j.background)},s.backgroundsize=function(){return J("backgroundSize")},s.borderimage=function(){return J("borderImage")},s.borderradius=function(){return J("borderRadius")},s.boxshadow=function(){return J("boxShadow")},s.textshadow=function(){return b.createElement("div").style.textShadow===""},s.opacity=function(){return E("opacity:.55"),/^0.55$/.test(j.opacity)},s.cssanimations=function(){return J("animationName")},s.csscolumns=function(){return J("columnCount")},s.cssgradients=function(){var a="background-image:",b="gradient(linear,left top,right bottom,from(#9f9),to(white));",c="linear-gradient(left top,#9f9, white);";return D((a+"-webkit- ".split(" ").join(b+a)+n.join(c+a)).slice(0,-a.length)),G(j.backgroundImage,"gradient")},s.cssreflections=function(){return J("boxReflect")},s.csstransforms=function(){return!!J("transform")},s.csstransforms3d=function(){var a=!!J("perspective");return a&&"webkitPerspective"in g.style&&y("@media (transform-3d),(-webkit-transform-3d){#modernizr{left:9px;position:absolute;height:3px;}}",function(b,c){a=b.offsetLeft===9&&b.offsetHeight===3}),a},s.csstransitions=function(){return J("transition")},s.fontface=function(){var a;return y('@font-face {font-family:"font";src:url("https://")}',function(c,d){var e=b.getElementById("smodernizr"),f=e.sheet||e.styleSheet,g=f?f.cssRules&&f.cssRules[0]?f.cssRules[0].cssText:f.cssText||"":"";a=/src/i.test(g)&&g.indexOf(d.split(" ")[0])===0}),a},s.generatedcontent=function(){var a;return y(["#",h,"{font:0/0 a}#",h,':after{content:"',l,'";visibility:hidden;font:3px/1 a}'].join(""),function(b){a=b.offsetHeight>=3}),a},s.video=function(){var a=b.createElement("video"),c=!1;try{if(c=!!a.canPlayType)c=new Boolean(c),c.ogg=a.canPlayType('video/ogg; codecs="theora"').replace(/^no$/,""),c.h264=a.canPlayType('video/mp4; codecs="avc1.42E01E"').replace(/^no$/,""),c.webm=a.canPlayType('video/webm; codecs="vp8, vorbis"').replace(/^no$/,"")}catch(d){}return c},s.audio=function(){var a=b.createElement("audio"),c=!1;try{if(c=!!a.canPlayType)c=new Boolean(c),c.ogg=a.canPlayType('audio/ogg; codecs="vorbis"').replace(/^no$/,""),c.mp3=a.canPlayType("audio/mpeg;").replace(/^no$/,""),c.wav=a.canPlayType('audio/wav; codecs="1"').replace(/^no$/,""),c.m4a=(a.canPlayType("audio/x-m4a;")||a.canPlayType("audio/aac;")).replace(/^no$/,"")}catch(d){}return c},s.localstorage=function(){try{return localStorage.setItem(h,h),localStorage.removeItem(h),!0}catch(a){return!1}},s.sessionstorage=function(){try{return sessionStorage.setItem(h,h),sessionStorage.removeItem(h),!0}catch(a){return!1}},s.webworkers=function(){return!!a.Worker},s.applicationcache=function(){return!!a.applicationCache},s.svg=function(){return!!b.createElementNS&&!!b.createElementNS(r.svg,"svg").createSVGRect},s.inlinesvg=function(){var a=b.createElement("div");return a.innerHTML="",(a.firstChild&&a.firstChild.namespaceURI)==r.svg},s.smil=function(){return!!b.createElementNS&&/SVGAnimate/.test(m.call(b.createElementNS(r.svg,"animate")))},s.svgclippaths=function(){return!!b.createElementNS&&/SVGClipPath/.test(m.call(b.createElementNS(r.svg,"clipPath")))};for(var L in s)C(s,L)&&(x=L.toLowerCase(),e[x]=s[L](),v.push((e[x]?"":"no-")+x));return e.input||K(),e.addTest=function(a,b){if(typeof a=="object")for(var d in a)C(a,d)&&e.addTest(d,a[d]);else{a=a.toLowerCase();if(e[a]!==c)return e;b=typeof b=="function"?b():b,typeof f!="undefined"&&f&&(g.className+=" "+(b?"":"no-")+a),e[a]=b}return e},D(""),i=k=null,function(a,b){function k(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x",d.insertBefore(c.lastChild,d.firstChild)}function l(){var a=r.elements;return typeof a=="string"?a.split(" "):a}function m(a){var b=i[a[g]];return b||(b={},h++,a[g]=h,i[h]=b),b}function n(a,c,f){c||(c=b);if(j)return c.createElement(a);f||(f=m(c));var g;return f.cache[a]?g=f.cache[a].cloneNode():e.test(a)?g=(f.cache[a]=f.createElem(a)).cloneNode():g=f.createElem(a),g.canHaveChildren&&!d.test(a)?f.frag.appendChild(g):g}function o(a,c){a||(a=b);if(j)return a.createDocumentFragment();c=c||m(a);var d=c.frag.cloneNode(),e=0,f=l(),g=f.length;for(;e",f="hidden"in a,j=a.childNodes.length==1||function(){b.createElement("a");var a=b.createDocumentFragment();return typeof a.cloneNode=="undefined"||typeof a.createDocumentFragment=="undefined"||typeof a.createElement=="undefined"}()}catch(c){f=!0,j=!0}})();var r={elements:c.elements||"abbr article aside audio bdi canvas data datalist details figcaption figure footer header hgroup mark meter nav output progress section summary time video",shivCSS:c.shivCSS!==!1,supportsUnknownElements:j,shivMethods:c.shivMethods!==!1,type:"default",shivDocument:q,createElement:n,createDocumentFragment:o};a.html5=r,q(b)}(this,b),e._version=d,e._prefixes=n,e._domPrefixes=q,e._cssomPrefixes=p,e.mq=z,e.hasEvent=A,e.testProp=function(a){return H([a])},e.testAllProps=J,e.testStyles=y,e.prefixed=function(a,b,c){return b?J(a,b,c):J(a,"pfx")},g.className=g.className.replace(/(^|\s)no-js(\s|$)/,"$1$2")+(f?" js "+v.join(" "):""),e}(this,this.document),function(a,b,c){function d(a){return"[object Function]"==o.call(a)}function e(a){return"string"==typeof a}function f(){}function g(a){return!a||"loaded"==a||"complete"==a||"uninitialized"==a}function h(){var a=p.shift();q=1,a?a.t?m(function(){("c"==a.t?B.injectCss:B.injectJs)(a.s,0,a.a,a.x,a.e,1)},0):(a(),h()):q=0}function i(a,c,d,e,f,i,j){function k(b){if(!o&&g(l.readyState)&&(u.r=o=1,!q&&h(),l.onload=l.onreadystatechange=null,b)){"img"!=a&&m(function(){t.removeChild(l)},50);for(var d in y[c])y[c].hasOwnProperty(d)&&y[c][d].onload()}}var j=j||B.errorTimeout,l=b.createElement(a),o=0,r=0,u={t:d,s:c,e:f,a:i,x:j};1===y[c]&&(r=1,y[c]=[]),"object"==a?l.data=c:(l.src=c,l.type=a),l.width=l.height="0",l.onerror=l.onload=l.onreadystatechange=function(){k.call(this,r)},p.splice(e,0,u),"img"!=a&&(r||2===y[c]?(t.insertBefore(l,s?null:n),m(k,j)):y[c].push(l))}function j(a,b,c,d,f){return q=0,b=b||"j",e(a)?i("c"==b?v:u,a,b,this.i++,c,d,f):(p.splice(this.i++,0,a),1==p.length&&h()),this}function k(){var a=B;return a.loader={load:j,i:0},a}var l=b.documentElement,m=a.setTimeout,n=b.getElementsByTagName("script")[0],o={}.toString,p=[],q=0,r="MozAppearance"in l.style,s=r&&!!b.createRange().compareNode,t=s?l:n.parentNode,l=a.opera&&"[object Opera]"==o.call(a.opera),l=!!b.attachEvent&&!l,u=r?"object":l?"script":"img",v=l?"script":u,w=Array.isArray||function(a){return"[object Array]"==o.call(a)},x=[],y={},z={timeout:function(a,b){return b.length&&(a.timeout=b[0]),a}},A,B;B=function(a){function b(a){var a=a.split("!"),b=x.length,c=a.pop(),d=a.length,c={url:c,origUrl:c,prefixes:a},e,f,g;for(f=0;f -div.stButton > button:first-child { - background-color: rgb(255, 75, 75); - color: rgb(255, 255, 255); -} -div.stButton > button:hover { - background-color: rgb(255, 75, 75); - color: rgb(255, 255, 255); -} -div.stButton > button:active { - background-color: rgb(255, 75, 75); - color: rgb(255, 255, 255); -} -div.stButton > button:focus { - background-color: rgb(255, 75, 75); - color: rgb(255, 255, 255); -} -.css-1cpxqw2:focus:not(:active) { - background-color: rgb(255, 75, 75); - border-color: rgb(255, 75, 75); - color: rgb(255, 255, 255); -} -""" - -style = """ - -""" - - -def apply_prod_style(st): - return st.markdown(style, unsafe_allow_html=True) \ No newline at end of file diff --git a/spaces/nanom/syntactic_tree/README.md b/spaces/nanom/syntactic_tree/README.md deleted file mode 100644 index ace81fbbb1d7a7387323226084f7fb67190e34e7..0000000000000000000000000000000000000000 --- a/spaces/nanom/syntactic_tree/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Syntactic Tree Generator -emoji: 🌳🌳🌳 -colorFrom: gray -colorTo: blue -sdk: gradio -sdk_version: 3.16 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ner4archives/ner4archives-NEL-vizualizer-app/app.py b/spaces/ner4archives/ner4archives-NEL-vizualizer-app/app.py deleted file mode 100644 index 4240148f0f48cba8d10e0cf5b28ae5502a7a2c8e..0000000000000000000000000000000000000000 --- a/spaces/ner4archives/ner4archives-NEL-vizualizer-app/app.py +++ /dev/null @@ -1,208 +0,0 @@ -import re -import json - -import streamlit -import spacy_streamlit -import spacy -from lxml import etree -import pandas as pd - -streamlit.set_page_config(layout="wide") - -samples_test = {"FRAN_IR_050370.xml": "./samples/FRAN_IR_050370.xml"} - -with open('config.json', mode="r") as json_file: - CONFIGURATION = json.loads(json_file.read()) - - - -# TITLE APP -streamlit.title("NER4Archives visualizer") -streamlit.sidebar.title("NER4Archives visualizer") -streamlit.sidebar.write("## Motivation") -streamlit.sidebar.markdown("""
-

This application is a proof-of-concept to apply and evaluate text classification task (also called Named-Entity Recognition) on -XML EAD finding aids and evaluate NER predictions.

- -

In the context of the NER4Archives project (INRIA-ALMAnaCH/Archives nationales), the goal is to train NER models on annotated dataset -extracted from XML EAD finding aids and test it on new data.

- -

Most of the models available here are trained with the NLP spaCy -framework and are available on the HF organisation hub. -Other models may be added in the future.

- -

The project also includes a downstream entity linking task. The SpaCy fishing extension (based on entity-fishing) is used here to support this purpose.

- -NER4Archives - 2022
-""", unsafe_allow_html=True) - -scol1, scol2 = streamlit.sidebar.columns(2) -scol1.image("./assets/an.png", width=170) -scol2.image("./assets/almanach_rouge-inria.png", width=100) - -flag_file = False - -# 1. User provides a XML EAD -streamlit.write("## 📄 Input XML EAD:") -filename = streamlit.file_uploader("Upload an XML EAD", type="xml") -streamlit.markdown("or use an XML EAD provided in [`samples/`](https://huggingface.co/spaces/ner4archives/ner4archives-NEL-vizualizer-app/blob/main/samples/) directory") -data = "" -flag_model = False - -if filename is not None: - data = filename.getvalue().decode("utf-8").encode("utf-8") - if len(data) > 0: - flag_file = True -def ead_strategy(tree): - # create a container for sentences and dids - # elements - sentences = [] - container_dids = [] - # get the level - dsc = tree.xpath('.//dsc') - for chlidren_dsc in dsc: - # get levels - for did in chlidren_dsc.xpath('.//did'): - container_dids.append(did) - text = "" - if did is not None: - text += " ".join( - [did_content.strip() for did_content in did.itertext() if len(did_content) > 0]) - # get the scopecontent if exists and concatenate with the rest - if did.getnext() is not None: - text += " ".join( - [" ".join(scopecontent.strip().split()) for scopecontent in did.getnext().itertext() if - len(scopecontent) > 0]) - sentences.append(" " + re.sub(r"\s{2,}", " ", text.strip()) + " ") - # assert len(sentences) == len(container_dids) - return container_dids, sentences - -model = "" -linking = True -flag_view = False -if flag_file: - col1, col2 = streamlit.columns(2) - col1.write("## 👁️ XML tree view:") - col2.write("## 👁️ Plain text view:") - parser = etree.XMLParser(ns_clean=True, recover=True, encoding='utf-8') - tree = etree.fromstring(data, parser=parser) - xml = etree.tostring(tree, pretty_print=True, encoding="utf-8").decode("utf-8") - col1.text_area("", value=xml, height=500, disabled=True) - dids, sentences = ead_strategy(tree) - plain = "\n".join(sentences) - col2.text_area("", value=plain, height=500, disabled=True) - flag_view = True - -if flag_view: - streamlit.write("## ⚙️ Configure NER model and options:") - models = [] - for pipe in spacy.info()["pipelines"]: - models.append(pipe) - option = streamlit.selectbox( - 'Choose a NER model you want to apply in the list: ', - models) - model = option - if model != "": - flag_model = True - #linking = streamlit.checkbox('Check to apply named entity linking (entity-fishing component)', value=True) - #linkingicon = "✅️" - #if linking is False: - # linkingicon = "❌" - linking = False - streamlit.write("#### Actual Parameters:") - #streamlit.write(f'- NER model selected: {option}\n - linking: {linkingicon}') - streamlit.write(f'- NER model selected: {option}\n') - -entities = [] -docs = [] -ents = [] -flag_vizualize = False - -# Launch NER process: -if flag_model: - if streamlit.button('Launch'): - plain = "\n".join(sentences) - with streamlit.spinner('Initialize NER...'): - nlp = spacy.load(model) - nlp.max_length = 5000000 - if linking: - nlp.add_pipe('entityfishing', config={"language": "fr", "api_ef_base": CONFIGURATION['ef_endpoint']}) - - with streamlit.spinner('NER processing...'): - if linking: - start_sentence = 0 - for doc in nlp.pipe(sentences, batch_size=250): - end_sentence = start_sentence + len(doc.text) + 1 - for ent in doc.ents: - start_tok = start_sentence + ent.start_char - end_tok = start_tok + len(ent.text) - entities.append(( - start_tok, - end_tok, - ent.text, - ent.label_, - ent._.kb_qid, - ent._.url_wikidata, - ent._.nerd_score - )) - start_sentence = end_sentence - else: - start_sentence = 0 - for doc in nlp.pipe(sentences): - end_sentence = start_sentence + len(doc.text) + 1 - for ent in doc.ents: - start_tok = start_sentence + ent.start_char - end_tok = start_tok + len(ent.text) - entities.append((start_tok, - end_tok, - ent.text, - ent.label_, - "", - "", - "" - )) - start_sentence = end_sentence - - - streamlit.success('😃 NER applied with success!') - - - df = pd.DataFrame(entities, columns=['START', - 'END', - 'MENTION', - 'NER LABEL', - 'QID', - 'WIKIDATA RESSOURCE (wikidata disambiguation)', - 'LINKING SCORE' - ]) - - streamlit.write("## 🔎 Explore named entities in table: ") - streamlit.write(df) - - - streamlit.write("## 🔎 Explore named entities in text: ") - spacy_streamlit.visualize_ner( - {"text": plain, - "ents": [{"start": ent[0], - "end": ent[1], - "label": ent[3], - "kb_id": ent[4] if linking else "", - "kb_url": ent[5] if linking else "" - } for ent in entities]}, - labels=["EVENT", "LOCATION", "ORGANISATION", "PERSON", "TITLE", 'LOC', 'MISC', 'ORG', 'PER'], - show_table=False, - manual=True, - title="", - displacy_options={ - "colors": { - "EVENT": "#ec7063", - "LOCATION": "#45b39d", - "ORGANISATION": "#f39c12", - "PERSON": "#3498db", - "TITLE": "#a569bd ", - "LOC": "#45b39d", - "MISC": "#ec7063", - "ORG": "#f39c12", - "PER": "#3498db" - } - }) \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Fritzbox Fon Wlan 7390840491image.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Fritzbox Fon Wlan 7390840491image.md deleted file mode 100644 index 83978a3396431893466eb9e199ee295536a4c9b4..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Fritzbox Fon Wlan 7390840491image.md +++ /dev/null @@ -1,38 +0,0 @@ - -

How to Set Up and Use Your Fritzbox Fon Wlan 7390 Router

-

If you are looking for a router that can do it all, you might want to consider the Fritzbox Fon Wlan 7390. This router is not only a powerful ADSL2+ modem, but also a VoIP-enabled device that can connect your landline and internet phones, a DECT base station that can support up to six cordless phones, a dual-band wireless access point that can deliver fast and reliable Wi-Fi, and a network hub that can share USB devices, such as storage, printers, and 3G dongles.

-

In this article, we will show you how to set up and use your Fritzbox Fon Wlan 7390 router, and explore some of its amazing features.

-

Fritzbox Fon Wlan 7390840491image


Download Zip ->->->-> https://urlcod.com/2uIazu



-

Setting Up Your Fritzbox Fon Wlan 7390 Router

-

Before you start, make sure you have the following items:

-
    -
  • Your Fritzbox Fon Wlan 7390 router
  • -
  • An Ethernet cable
  • -
  • A phone cable
  • -
  • A PC range ADSL/phone splitter
  • -
  • An RJ45 Y cable
  • -
  • Two RJ45 > RJ12 converters
  • -
  • Your internet service provider (ISP) details
  • -
  • Your landline phone number and password (if you have VoIP service)
  • -
-

Follow these steps to set up your router:

-
    -
  1. Connect the phone cable to the DSL/TEL port on the back of the router, and to the ADSL port on the splitter. Connect your landline phone to the PHONE port on the splitter.
  2. -
  3. Connect the Ethernet cable to the LAN 1 port on the back of the router, and to your computer's Ethernet port.
  4. -
  5. Connect the power adapter to the power socket on the back of the router, and to an electrical outlet. Wait for the Power/DSL LED to light up.
  6. -
  7. Open a web browser on your computer, and enter http://fritz.box in the address bar. You will see the Fritzbox user interface.
  8. -
  9. Click on "Start Wizard" and follow the instructions to configure your internet connection. You will need to enter your ISP details, such as username and password.
  10. -
  11. If you have VoIP service, click on "Telephony" in the user interface, and then on "Telephony Devices". Click on "New Device" and follow the instructions to set up your internet phone. You will need to enter your landline phone number and password.
  12. -
  13. If you want to use wireless devices, click on "Wi-Fi" in the user interface, and then on "Wi-Fi Network". Make note of the name (SSID) and network key of your Wi-Fi network. You can also change these settings if you want.
  14. -
  15. Connect your wireless devices to your Wi-Fi network by entering the network key or using WPS (if supported).
  16. -
-

Congratulations! You have successfully set up your Fritzbox Fon Wlan 7390 router.

-

Using Your Fritzbox Fon Wlan 7390 Router

-

Your Fritzbox Fon Wlan 7390 router has many features that you can use to enhance your home networking experience. Here are some of them:

-
    -
  • You can connect up to six cordless phones to your router using DECT technology. To do this, press and hold the DECT button on the router until it flashes, then press and hold the registration button on your phone until it beeps. You can then make and receive calls using your landline or internet phone service.
  • -
  • You can share USB devices, such as storage, printers, and 3G dongles, with other devices on your network. To do this, connect your USB device to one of the USB ports on the router, and then access it from your computer or smartphone using the Fritzbox user interface or Fritzbox app.
  • -
  • You can use IPv6 protocol to access websites that support it. To do this, click on "Internet" in the user interface, and then on "Account Information". Click on "IPv6" and enable it. You can also configure other IPv6 settings if you want.
  • -
  • You can monitor your internet usage and set quotas for different devices. To do this, click on "Internet" in the user interface, and then on "Online Monitor". You can see how much data each device has used, and set limits for them

    7b8c122e87
    -
    -
    \ No newline at end of file diff --git a/spaces/neuesql/sqlgptapp/README.md b/spaces/neuesql/sqlgptapp/README.md deleted file mode 100644 index 196a1d3c1f060dfa5118383f1760b83d5b7ae81b..0000000000000000000000000000000000000000 --- a/spaces/neuesql/sqlgptapp/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Sqlgpt -emoji: 🦀 -colorFrom: blue -colorTo: green -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/nightfury/SD_Text-2-Image/README.md b/spaces/nightfury/SD_Text-2-Image/README.md deleted file mode 100644 index 87bfd01dca6eec94a598f724b1898d1dbe664880..0000000000000000000000000000000000000000 --- a/spaces/nightfury/SD_Text-2-Image/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: SD Text-2-Image -emoji: 📜➡️🖼️ -colorFrom: orange -colorTo: purple -sdk: gradio -sdk_version: 3.4.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/nomic-ai/Anthropic_hh-rlhf/index.html b/spaces/nomic-ai/Anthropic_hh-rlhf/index.html deleted file mode 100644 index 5f3c8d084c3906e0599bd28b5338f087094521a1..0000000000000000000000000000000000000000 --- a/spaces/nomic-ai/Anthropic_hh-rlhf/index.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - Anthropic/hh-rlhf - - - - -
    - -
    - - - \ No newline at end of file diff --git a/spaces/nomic-ai/amazon_reviews_multi/index.html b/spaces/nomic-ai/amazon_reviews_multi/index.html deleted file mode 100644 index 496dc5821850d76428bf9385b9647838205ed1b5..0000000000000000000000000000000000000000 --- a/spaces/nomic-ai/amazon_reviews_multi/index.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - amazon_reviews_multi - - - - -
    - -
    - - - \ No newline at end of file diff --git a/spaces/oliver2023/chatgpt-on-wechat/channel/wechatmp/wechatmp_channel.py b/spaces/oliver2023/chatgpt-on-wechat/channel/wechatmp/wechatmp_channel.py deleted file mode 100644 index c7f7142d5ee0352742b270384e057c08d2ab606d..0000000000000000000000000000000000000000 --- a/spaces/oliver2023/chatgpt-on-wechat/channel/wechatmp/wechatmp_channel.py +++ /dev/null @@ -1,234 +0,0 @@ -# -*- coding: utf-8 -*- -import web -import time -import math -import hashlib -import textwrap -from channel.chat_channel import ChatChannel -import channel.wechatmp.reply as reply -import channel.wechatmp.receive as receive -from common.singleton import singleton -from common.log import logger -from config import conf -from bridge.reply import * -from bridge.context import * -from plugins import * -import traceback - -# If using SSL, uncomment the following lines, and modify the certificate path. -# from cheroot.server import HTTPServer -# from cheroot.ssl.builtin import BuiltinSSLAdapter -# HTTPServer.ssl_adapter = BuiltinSSLAdapter( -# certificate='/ssl/cert.pem', -# private_key='/ssl/cert.key') - - -# from concurrent.futures import ThreadPoolExecutor -# thread_pool = ThreadPoolExecutor(max_workers=8) - -@singleton -class WechatMPChannel(ChatChannel): - NOT_SUPPORT_REPLYTYPE = [ReplyType.IMAGE, ReplyType.VOICE] - def __init__(self): - super().__init__() - self.cache_dict = dict() - self.query1 = dict() - self.query2 = dict() - self.query3 = dict() - - - def startup(self): - urls = ( - '/wx', 'SubsribeAccountQuery', - ) - app = web.application(urls, globals()) - port = conf().get('wechatmp_port', 8080) - web.httpserver.runsimple(app.wsgifunc(), ('0.0.0.0', port)) - - - def send(self, reply: Reply, context: Context): - reply_cnt = math.ceil(len(reply.content) / 600) - receiver = context["receiver"] - self.cache_dict[receiver] = (reply_cnt, reply.content) - logger.debug("[send] reply to {} saved to cache: {}".format(receiver, reply)) - - -def verify_server(): - try: - data = web.input() - if len(data) == 0: - return "None" - signature = data.signature - timestamp = data.timestamp - nonce = data.nonce - echostr = data.echostr - token = conf().get('wechatmp_token') #请按照公众平台官网\基本配置中信息填写 - - data_list = [token, timestamp, nonce] - data_list.sort() - sha1 = hashlib.sha1() - # map(sha1.update, data_list) #python2 - sha1.update("".join(data_list).encode('utf-8')) - hashcode = sha1.hexdigest() - print("handle/GET func: hashcode, signature: ", hashcode, signature) - if hashcode == signature: - return echostr - else: - return "" - except Exception as Argument: - return Argument - - -# This class is instantiated once per query -class SubsribeAccountQuery(): - - def GET(self): - return verify_server() - - def POST(self): - channel_instance = WechatMPChannel() - try: - query_time = time.time() - webData = web.data() - # logger.debug("[wechatmp] Receive request:\n" + webData.decode("utf-8")) - wechat_msg = receive.parse_xml(webData) - if wechat_msg.msg_type == 'text': - from_user = wechat_msg.from_user_id - to_user = wechat_msg.to_user_id - message = wechat_msg.content.decode("utf-8") - message_id = wechat_msg.msg_id - - logger.info("[wechatmp] {}:{} Receive post query {} {}: {}".format(web.ctx.env.get('REMOTE_ADDR'), web.ctx.env.get('REMOTE_PORT'), from_user, message_id, message)) - - cache_key = from_user - cache = channel_instance.cache_dict.get(cache_key) - - reply_text = "" - # New request - if cache == None: - # The first query begin, reset the cache - context = channel_instance._compose_context(ContextType.TEXT, message, isgroup=False, msg=wechat_msg) - logger.debug("[wechatmp] context: {} {}".format(context, wechat_msg)) - if context: - # set private openai_api_key - # if from_user is not changed in itchat, this can be placed at chat_channel - user_data = conf().get_user_data(from_user) - context['openai_api_key'] = user_data.get('openai_api_key') # None or user openai_api_key - channel_instance.cache_dict[cache_key] = (0, "") - channel_instance.produce(context) - else: - trigger_prefix = conf().get('single_chat_prefix',[''])[0] - if trigger_prefix: - content = textwrap.dedent(f"""\ - 请输入'{trigger_prefix}'接你想说的话跟我说话。 - 例如: - {trigger_prefix}你好,很高兴见到你。""") - else: - logger.error(f"[wechatmp] unknown error") - content = textwrap.dedent("""\ - 未知错误,请稍后再试""") - replyMsg = reply.TextMsg(wechat_msg.from_user_id, wechat_msg.to_user_id, content) - return replyMsg.send() - channel_instance.query1[cache_key] = False - channel_instance.query2[cache_key] = False - channel_instance.query3[cache_key] = False - # Request again - elif cache[0] == 0 and channel_instance.query1.get(cache_key) == True and channel_instance.query2.get(cache_key) == True and channel_instance.query3.get(cache_key) == True: - channel_instance.query1[cache_key] = False #To improve waiting experience, this can be set to True. - channel_instance.query2[cache_key] = False #To improve waiting experience, this can be set to True. - channel_instance.query3[cache_key] = False - elif cache[0] >= 1: - # Skip the waiting phase - channel_instance.query1[cache_key] = True - channel_instance.query2[cache_key] = True - channel_instance.query3[cache_key] = True - - - cache = channel_instance.cache_dict.get(cache_key) - if channel_instance.query1.get(cache_key) == False: - # The first query from wechat official server - logger.debug("[wechatmp] query1 {}".format(cache_key)) - channel_instance.query1[cache_key] = True - cnt = 0 - while cache[0] == 0 and cnt < 45: - cnt = cnt + 1 - time.sleep(0.1) - cache = channel_instance.cache_dict.get(cache_key) - if cnt == 45: - # waiting for timeout (the POST query will be closed by wechat official server) - time.sleep(5) - # and do nothing - return - else: - pass - elif channel_instance.query2.get(cache_key) == False: - # The second query from wechat official server - logger.debug("[wechatmp] query2 {}".format(cache_key)) - channel_instance.query2[cache_key] = True - cnt = 0 - while cache[0] == 0 and cnt < 45: - cnt = cnt + 1 - time.sleep(0.1) - cache = channel_instance.cache_dict.get(cache_key) - if cnt == 45: - # waiting for timeout (the POST query will be closed by wechat official server) - time.sleep(5) - # and do nothing - return - else: - pass - elif channel_instance.query3.get(cache_key) == False: - # The third query from wechat official server - logger.debug("[wechatmp] query3 {}".format(cache_key)) - channel_instance.query3[cache_key] = True - cnt = 0 - while cache[0] == 0 and cnt < 40: - cnt = cnt + 1 - time.sleep(0.1) - cache = channel_instance.cache_dict.get(cache_key) - if cnt == 40: - # Have waiting for 3x5 seconds - # return timeout message - reply_text = "【正在思考中,回复任意文字尝试获取回复】" - logger.info("[wechatmp] Three queries has finished For {}: {}".format(from_user, message_id)) - replyPost = reply.TextMsg(from_user, to_user, reply_text).send() - return replyPost - else: - pass - - if float(time.time()) - float(query_time) > 4.8: - logger.info("[wechatmp] Timeout for {} {}".format(from_user, message_id)) - return - - - if cache[0] > 1: - reply_text = cache[1][:600] + "\n【未完待续,回复任意文字以继续】" #wechatmp auto_reply length limit - channel_instance.cache_dict[cache_key] = (cache[0] - 1, cache[1][600:]) - elif cache[0] == 1: - reply_text = cache[1] - channel_instance.cache_dict.pop(cache_key) - logger.info("[wechatmp] {}:{} Do send {}".format(web.ctx.env.get('REMOTE_ADDR'), web.ctx.env.get('REMOTE_PORT'), reply_text)) - replyPost = reply.TextMsg(from_user, to_user, reply_text).send() - return replyPost - - elif wechat_msg.msg_type == 'event': - logger.info("[wechatmp] Event {} from {}".format(wechat_msg.Event, wechat_msg.from_user_id)) - trigger_prefix = conf().get('single_chat_prefix',[''])[0] - content = textwrap.dedent(f"""\ - 感谢您的关注! - 这里是ChatGPT,可以自由对话。 - 资源有限,回复较慢,请勿着急。 - 支持通用表情输入。 - 暂时不支持图片输入。 - 支持图片输出,画字开头的问题将回复图片链接。 - 支持角色扮演和文字冒险两种定制模式对话。 - 输入'{trigger_prefix}#帮助' 查看详细指令。""") - replyMsg = reply.TextMsg(wechat_msg.from_user_id, wechat_msg.to_user_id, content) - return replyMsg.send() - else: - logger.info("暂且不处理") - return "success" - except Exception as exc: - logger.exception(exc) - return exc - diff --git a/spaces/patgpt4/MusicGen/MODEL_CARD.md b/spaces/patgpt4/MusicGen/MODEL_CARD.md deleted file mode 100644 index 6c2c9f883969eb905e74ad3376966d156cc5ca00..0000000000000000000000000000000000000000 --- a/spaces/patgpt4/MusicGen/MODEL_CARD.md +++ /dev/null @@ -1,81 +0,0 @@ -# MusicGen Model Card - -## Model details - -**Organization developing the model:** The FAIR team of Meta AI. - -**Model date:** MusicGen was trained between April 2023 and May 2023. - -**Model version:** This is the version 1 of the model. - -**Model type:** MusicGen consists of an EnCodec model for audio tokenization, an auto-regressive language model based on the transformer architecture for music modeling. The model comes in different sizes: 300M, 1.5B and 3.3B parameters ; and two variants: a model trained for text-to-music generation task and a model trained for melody-guided music generation. - -**Paper or resources for more information:** More information can be found in the paper [Simple and Controllable Music Generation][arxiv]. - -**Citation details** See [our paper][arxiv] - -**License** Code is released under MIT, model weights are released under CC-BY-NC 4.0. - -**Where to send questions or comments about the model:** Questions and comments about MusicGen can be sent via the [Github repository](https://github.com/facebookresearch/audiocraft) of the project, or by opening an issue. - -## Intended use -**Primary intended use:** The primary use of MusicGen is research on AI-based music generation, including: - -- Research efforts, such as probing and better understanding the limitations of generative models to further improve the state of science -- Generation of music guided by text or melody to understand current abilities of generative AI models by machine learning amateurs - -**Primary intended users:** The primary intended users of the model are researchers in audio, machine learning and artificial intelligence, as well as amateur seeking to better understand those models. - -**Out-of-scope use cases** The model should not be used on downstream applications without further risk evaluation and mitigation. The model should not be used to intentionally create or disseminate music pieces that create hostile or alienating environments for people. This includes generating music that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes. - -## Metrics - -**Models performance measures:** We used the following objective measure to evaluate the model on a standard music benchmark: - -- Frechet Audio Distance computed on features extracted from a pre-trained audio classifier (VGGish) -- Kullback-Leibler Divergence on label distributions extracted from a pre-trained audio classifier (PaSST) -- CLAP Score between audio embedding and text embedding extracted from a pre-trained CLAP model - -Additionally, we run qualitative studies with human participants, evaluating the performance of the model with the following axes: - -- Overall quality of the music samples; -- Text relevance to the provided text input; -- Adherence to the melody for melody-guided music generation. - -More details on performance measures and human studies can be found in the paper. - -**Decision thresholds:** Not applicable. - -## Evaluation datasets - -The model was evaluated on the [MusicCaps benchmark](https://www.kaggle.com/datasets/googleai/musiccaps) and on an in-domain held-out evaluation set, with no artist overlap with the training set. - -## Training datasets - -The model was trained on licensed data using the following sources: the [Meta Music Initiative Sound Collection](https://www.fb.com/sound), [Shutterstock music collection](https://www.shutterstock.com/music) and the [Pond5 music collection](https://www.pond5.com/). See the paper for more details about the training set and corresponding preprocessing. - -## Quantitative analysis - -More information can be found in the paper [Simple and Controllable Music Generation][arxiv], in the Experimental Setup section. - -## Limitations and biases - -**Data:** The data sources used to train the model are created by music professionals and covered by legal agreements with the right holders. The model is trained on 20K hours of data, we believe that scaling the model on larger datasets can further improve the performance of the model. - -**Mitigations:** Vocals have been removed from the data source using corresponding tags, and then using using a state-of-the-art music source separation method, namely using the open source [Hybrid Transformer for Music Source Separation](https://github.com/facebookresearch/demucs) (HT-Demucs). - -**Limitations:** - -- The model is not able to generate realistic vocals. -- The model has been trained with English descriptions and will not perform as well in other languages. -- The model does not perform equally well for all music styles and cultures. -- The model sometimes generates end of songs, collapsing to silence. -- It is sometimes difficult to assess what types of text descriptions provide the best generations. Prompt engineering may be required to obtain satisfying results. - -**Biases:** The source of data is potentially lacking diversity and all music cultures are not equally represented in the dataset. The model may not perform equally well on the wide variety of music genres that exists. The generated samples from the model will reflect the biases from the training data. Further work on this model should include methods for balanced and just representations of cultures, for example, by scaling the training data to be both diverse and inclusive. - -**Risks and harms:** Biases and limitations of the model may lead to generation of samples that may be considered as biased, inappropriate or offensive. We believe that providing the code to reproduce the research and train new models will allow to broaden the application to new and more representative data. - -**Use cases:** Users must be aware of the biases, limitations and risks of the model. MusicGen is a model developed for artificial intelligence research on controllable music generation. As such, it should not be used for downstream applications without further investigation and mitigation of risks. - -[arxiv]: https://arxiv.org/abs/2306.05284 diff --git a/spaces/phyloforfun/VoucherVision/vouchervision/VoucherVision_Config_Builder.py b/spaces/phyloforfun/VoucherVision/vouchervision/VoucherVision_Config_Builder.py deleted file mode 100644 index 9a60fce402258ed5feccbc3a621f627279fe98ed..0000000000000000000000000000000000000000 --- a/spaces/phyloforfun/VoucherVision/vouchervision/VoucherVision_Config_Builder.py +++ /dev/null @@ -1,607 +0,0 @@ -import os, yaml, platform, traceback -from vouchervision.LeafMachine2_Config_Builder import get_default_download_folder, write_config_file -from vouchervision.general_utils import validate_dir, print_main_fail -from vouchervision.vouchervision_main import voucher_vision -from general_utils import get_cfg_from_full_path - -def build_VV_config(): - ############################################# - ############ Set common defaults ############ - ############################################# - # Changing the values below will set new - # default values each time you open the - # VoucherVision user interface - ############################################# - ############################################# - ############################################# - - dir_home = os.path.dirname(os.path.dirname(__file__)) - run_name = 'test' - # dir_images_local = 'D:/Dropbox/LM2_Env/Image_Datasets/GBIF_BroadSample_3SppPerFamily1' - dir_images_local = os.path.join(dir_home,'demo','demo_images') - - # The default output location is the computer's "Downloads" folder - # You can set dir_output directly by typing the folder path, - # OR you can uncomment the line "dir_output = default_output_folder" - # to have VoucherVision save to the Downloads folder by default - default_output_folder = get_default_download_folder() - dir_output = default_output_folder - # dir_output = 'D:/D_Desktop/LM2' - - prefix_removal = '' #'MICH-V-' - suffix_removal = '' - catalog_numerical_only = False - - LLM_version_user = 'Azure GPT 4' - prompt_version = 'Version 2' # from ["Version 1", "Version 1 No Domain Knowledge", "Version 2"] - use_LeafMachine2_collage_images = False # Use LeafMachine2 collage images - do_create_OCR_helper_image = False - - batch_size = 500 - - path_domain_knowledge = os.path.join(dir_home,'domain_knowledge','SLTP_UM_AllAsiaMinimalInRegion.xlsx') - embeddings_database_name = os.path.splitext(os.path.basename(path_domain_knowledge))[0] - - ############################################# - ############################################# - ########## DO NOT EDIT BELOW HERE ########### - ############################################# - ############################################# - return assemble_config(dir_home, run_name, dir_images_local,dir_output, - prefix_removal,suffix_removal,catalog_numerical_only,LLM_version_user,batch_size, - path_domain_knowledge,embeddings_database_name,use_LeafMachine2_collage_images, - prompt_version, do_create_OCR_helper_image, use_domain_knowledge=False) - -def assemble_config(dir_home, run_name, dir_images_local,dir_output, - prefix_removal,suffix_removal,catalog_numerical_only,LLM_version_user,batch_size, - path_domain_knowledge,embeddings_database_name,use_LeafMachine2_collage_images, - prompt_version, do_create_OCR_helper_image_user, use_domain_knowledge=False): - - - # Initialize the base structure - config_data = { - 'leafmachine': {} - } - - # Modular sections to be added to 'leafmachine' - do_section = { - 'check_for_illegal_filenames': False, - 'check_for_corrupt_images_make_vertical': True, - } - - print_section = { - 'verbose': True, - 'optional_warnings': True - } - - logging_section = { - 'log_level': None - } - - - project_section = { - 'dir_output': dir_output, - 'run_name': run_name, - 'image_location': 'local', - 'batch_size': batch_size, - 'num_workers': 1, - 'dir_images_local': dir_images_local, - 'continue_run_from_partial_xlsx': '', - 'prefix_removal': prefix_removal, - 'suffix_removal': suffix_removal, - 'catalog_numerical_only': catalog_numerical_only, - 'use_domain_knowledge': use_domain_knowledge, - 'embeddings_database_name': embeddings_database_name, - 'build_new_embeddings_database': False, - 'path_to_domain_knowledge_xlsx': path_domain_knowledge, - 'prompt_version': prompt_version, - 'delete_all_temps': False, - 'delete_temps_keep_VVE': False, - } - - modules_section = { - 'specimen_crop': True - } - - LLM_version = LLM_version_user - use_RGB_label_images = use_LeafMachine2_collage_images # Use LeafMachine2 collage images - do_create_OCR_helper_image = do_create_OCR_helper_image_user - - cropped_components_section = { - 'do_save_cropped_annotations': True, - 'save_cropped_annotations': ['label','barcode'], - 'save_per_image': False, - 'save_per_annotation_class': True, - 'binarize_labels': False, - 'binarize_labels_skeletonize': False - } - - data_section = { - 'save_json_rulers': False, - 'save_json_measurements': False, - 'save_individual_csv_files_rulers': False, - 'save_individual_csv_files_measurements': False, - 'save_individual_csv_files_landmarks': False, - 'save_individual_efd_files': False, - 'include_darwin_core_data_from_combined_file': False, - 'do_apply_conversion_factor': False - } - - overlay_section = { - 'save_overlay_to_pdf': False, - 'save_overlay_to_jpgs': True, - 'overlay_dpi': 300, # Between 100 to 300 - 'overlay_background_color': 'black', # Either 'white' or 'black' - - 'show_archival_detections': True, - 'show_plant_detections': True, - 'show_segmentations': True, - 'show_landmarks': True, - 'ignore_archival_detections_classes': [], - 'ignore_plant_detections_classes': ['leaf_whole', 'specimen'], # Could also include 'leaf_partial' and others if needed - 'ignore_landmark_classes': [], - - 'line_width_archival': 12, # Previous value given was 2 - 'line_width_plant': 12, # Previous value given was 6 - 'line_width_seg': 12, # 12 is specified as "thick" - 'line_width_efd': 12, # 3 is specified as "thick" but 12 is given here - 'alpha_transparency_archival': 0.3, - 'alpha_transparency_plant': 0, - 'alpha_transparency_seg_whole_leaf': 0.4, - 'alpha_transparency_seg_partial_leaf': 0.3 - } - - archival_component_detector_section = { - 'detector_type': 'Archival_Detector', - 'detector_version': 'PREP_final', - 'detector_iteration': 'PREP_final', - 'detector_weights': 'best.pt', - 'minimum_confidence_threshold': 0.5, # Default is 0.5 - 'do_save_prediction_overlay_images': True, - 'ignore_objects_for_overlay': [] - } - - # Add the sections to the 'leafmachine' key - config_data['leafmachine']['do'] = do_section - config_data['leafmachine']['print'] = print_section - config_data['leafmachine']['logging'] = logging_section - config_data['leafmachine']['project'] = project_section - config_data['leafmachine']['LLM_version'] = LLM_version - config_data['leafmachine']['use_RGB_label_images'] = use_RGB_label_images - config_data['leafmachine']['do_create_OCR_helper_image'] = do_create_OCR_helper_image - config_data['leafmachine']['cropped_components'] = cropped_components_section - config_data['leafmachine']['modules'] = modules_section - config_data['leafmachine']['data'] = data_section - config_data['leafmachine']['overlay'] = overlay_section - config_data['leafmachine']['archival_component_detector'] = archival_component_detector_section - - return config_data, dir_home - -def build_api_tests(api): - dir_home = os.path.dirname(os.path.dirname(__file__)) - path_to_configs = os.path.join(dir_home,'demo','demo_configs') - - dir_home = os.path.dirname(os.path.dirname(__file__)) - dir_images_local = os.path.join(dir_home,'demo','demo_images') - validate_dir(os.path.join(dir_home,'demo','demo_configs')) - path_domain_knowledge = os.path.join(dir_home,'domain_knowledge','SLTP_UM_AllAsiaMinimalInRegion.xlsx') - embeddings_database_name = os.path.splitext(os.path.basename(path_domain_knowledge))[0] - prefix_removal = '' - suffix_removal = '' - catalog_numerical_only = False - batch_size = 500 - do_create_OCR_helper_image = False - - - # ### Option 1: "GPT 4" of ["GPT 4", "GPT 3.5", "Azure GPT 4", "Azure GPT 3.5", "PaLM 2"] - # LLM_version_user = 'Azure GPT 4' - - # ### Option 2: False of [False, True] - # use_LeafMachine2_collage_images = False - - # ### Option 3: False of [False, True] - # use_domain_knowledge = True - - test_results = {} - if api == 'openai': - OPT1, OPT2, OPT3 = TestOptionsAPI_openai.get_options() - elif api == 'palm': - OPT1, OPT2, OPT3 = TestOptionsAPI_palm.get_options() - elif api == 'azure_openai': - OPT1, OPT2, OPT3 = TestOptionsAPI_azure_openai.get_options() - else: - raise - - ind = -1 - ind_opt1 = -1 - ind_opt2 = -1 - ind_opt3 = -1 - - for opt1 in OPT1: - ind_opt1+= 1 - for opt2 in OPT2: - ind_opt2 += 1 - for opt3 in OPT3: - ind += 1 - ind_opt3 += 1 - - LLM_version_user = opt1 - use_LeafMachine2_collage_images = opt2 - prompt_version = opt3 - - filename = f"{ind}__OPT1-{ind_opt1}__OPT2-{ind_opt2}__OPT3-{ind_opt3}.yaml" - run_name = f"{ind}__OPT1-{ind_opt1}__OPT2-{ind_opt2}__OPT3-{ind_opt3}" - - dir_output = os.path.join(dir_home,'demo','demo_output','run_name') - validate_dir(dir_output) - - config_data, dir_home = assemble_config(dir_home, run_name, dir_images_local,dir_output, - prefix_removal,suffix_removal,catalog_numerical_only,LLM_version_user,batch_size, - path_domain_knowledge,embeddings_database_name,use_LeafMachine2_collage_images, - prompt_version, do_create_OCR_helper_image) - - write_config_file(config_data, os.path.join(dir_home,'demo','demo_configs'),filename=filename) - - test_results[run_name] = False - ind_opt3 = -1 - ind_opt2 = -1 - ind_opt1 = -1 - - return dir_home, path_to_configs, test_results - -def build_demo_tests(llm_version): - dir_home = os.path.dirname(os.path.dirname(__file__)) - path_to_configs = os.path.join(dir_home,'demo','demo_configs') - - dir_home = os.path.dirname(os.path.dirname(__file__)) - dir_images_local = os.path.join(dir_home,'demo','demo_images') - validate_dir(os.path.join(dir_home,'demo','demo_configs')) - path_domain_knowledge = os.path.join(dir_home,'domain_knowledge','SLTP_UM_AllAsiaMinimalInRegion.xlsx') - embeddings_database_name = os.path.splitext(os.path.basename(path_domain_knowledge))[0] - prefix_removal = '' - suffix_removal = '' - catalog_numerical_only = False - batch_size = 500 - do_create_OCR_helper_image = False - - - # ### Option 1: "GPT 4" of ["GPT 4", "GPT 3.5", "Azure GPT 4", "Azure GPT 3.5", "PaLM 2"] - # LLM_version_user = 'Azure GPT 4' - - # ### Option 2: False of [False, True] - # use_LeafMachine2_collage_images = False - - # ### Option 3: False of [False, True] - # use_domain_knowledge = True - - test_results = {} - if llm_version == 'gpt': - OPT1, OPT2, OPT3 = TestOptionsGPT.get_options() - elif llm_version == 'palm': - OPT1, OPT2, OPT3 = TestOptionsPalm.get_options() - else: - raise - - ind = -1 - ind_opt1 = -1 - ind_opt2 = -1 - ind_opt3 = -1 - - for opt1 in OPT1: - ind_opt1+= 1 - for opt2 in OPT2: - ind_opt2 += 1 - for opt3 in OPT3: - ind += 1 - ind_opt3 += 1 - - LLM_version_user = opt1 - use_LeafMachine2_collage_images = opt2 - prompt_version = opt3 - - filename = f"{ind}__OPT1-{ind_opt1}__OPT2-{ind_opt2}__OPT3-{ind_opt3}.yaml" - run_name = f"{ind}__OPT1-{ind_opt1}__OPT2-{ind_opt2}__OPT3-{ind_opt3}" - - dir_output = os.path.join(dir_home,'demo','demo_output','run_name') - validate_dir(dir_output) - - - if llm_version == 'gpt': - if prompt_version in ['Version 1']: - config_data, dir_home = assemble_config(dir_home, run_name, dir_images_local,dir_output, - prefix_removal,suffix_removal,catalog_numerical_only,LLM_version_user,batch_size, - path_domain_knowledge,embeddings_database_name,use_LeafMachine2_collage_images, - prompt_version, do_create_OCR_helper_image, use_domain_knowledge=True) - else: - config_data, dir_home = assemble_config(dir_home, run_name, dir_images_local,dir_output, - prefix_removal,suffix_removal,catalog_numerical_only,LLM_version_user,batch_size, - path_domain_knowledge,embeddings_database_name,use_LeafMachine2_collage_images, - prompt_version, do_create_OCR_helper_image) - elif llm_version == 'palm': - if prompt_version in ['Version 1 PaLM 2']: - config_data, dir_home = assemble_config(dir_home, run_name, dir_images_local,dir_output, - prefix_removal,suffix_removal,catalog_numerical_only,LLM_version_user,batch_size, - path_domain_knowledge,embeddings_database_name,use_LeafMachine2_collage_images, - prompt_version, do_create_OCR_helper_image, use_domain_knowledge=True) - else: - config_data, dir_home = assemble_config(dir_home, run_name, dir_images_local,dir_output, - prefix_removal,suffix_removal,catalog_numerical_only,LLM_version_user,batch_size, - path_domain_knowledge,embeddings_database_name,use_LeafMachine2_collage_images, - prompt_version, do_create_OCR_helper_image) - - - write_config_file(config_data, os.path.join(dir_home,'demo','demo_configs'),filename=filename) - - test_results[run_name] = False - ind_opt3 = -1 - ind_opt2 = -1 - ind_opt1 = -1 - - return dir_home, path_to_configs, test_results - -class TestOptionsGPT: - OPT1 = ["GPT 4", "GPT 3.5", "Azure GPT 4", "Azure GPT 3.5"] - OPT2 = [False, True] - OPT3 = ["Version 1", "Version 1 No Domain Knowledge", "Version 2"] - - @classmethod - def get_options(cls): - return cls.OPT1, cls.OPT2, cls.OPT3 - @classmethod - def get_length(cls): - return 24 - -class TestOptionsPalm: - OPT1 = ["PaLM 2"] - OPT2 = [False, True] - OPT3 = ["Version 1 PaLM 2", "Version 1 PaLM 2 No Domain Knowledge", "Version 2 PaLM 2"] - - @classmethod - def get_options(cls): - return cls.OPT1, cls.OPT2, cls.OPT3 - @classmethod - def get_length(cls): - return 6 - -class TestOptionsAPI_openai: - OPT1 = ["GPT 3.5"] - OPT2 = [False] - OPT3 = ["Version 2"] - - @classmethod - def get_options(cls): - return cls.OPT1, cls.OPT2, cls.OPT3 - @classmethod - def get_length(cls): - return 24 - -class TestOptionsAPI_azure_openai: - OPT1 = ["Azure GPT 3.5"] - OPT2 = [False] - OPT3 = ["Version 2"] - - @classmethod - def get_options(cls): - return cls.OPT1, cls.OPT2, cls.OPT3 - @classmethod - def get_length(cls): - return 24 - -class TestOptionsAPI_palm: - OPT1 = ["PaLM 2"] - OPT2 = [False] - OPT3 = ["Version 2 PaLM 2"] - - @classmethod - def get_options(cls): - return cls.OPT1, cls.OPT2, cls.OPT3 - @classmethod - def get_length(cls): - return 6 - -def run_demo_tests_GPT(progress_report): - dir_home, path_to_configs, test_results = build_demo_tests('gpt') - progress_report.set_n_overall(len(test_results.items())) - - JSON_results = {} - - for ind, (cfg, result) in enumerate(test_results.items()): - OPT1, OPT2, OPT3 = TestOptionsGPT.get_options() - - test_ind, ind_opt1, ind_opt2, ind_opt3 = cfg.split('__') - opt1_readable = OPT1[int(ind_opt1.split('-')[1])] - - if opt1_readable in ["Azure GPT 4", "Azure GPT 3.5"]: - api_version = 'gpt-azure' - elif opt1_readable in ["GPT 4", "GPT 3.5"]: - api_version = 'gpt' - else: - raise - - opt2_readable = "Use LeafMachine2 for Collage Images" if OPT2[int(ind_opt2.split('-')[1])] else "Don't use LeafMachine2 for Collage Images" - opt3_readable = f"Prompt {OPT3[int(ind_opt3.split('-')[1])]}" - # Construct the human-readable test name - human_readable_name = f"{opt1_readable}, {opt2_readable}, {opt3_readable}" - get_n_overall = progress_report.get_n_overall() - progress_report.update_overall(f"Test {int(test_ind)+1} of {get_n_overall} --- Validating {human_readable_name}") - print_main_fail(f"Starting validation test: {human_readable_name}") - cfg_file_path = os.path.join(path_to_configs,'.'.join([cfg,'yaml'])) - - if check_API_key(dir_home, api_version) and check_API_key(dir_home, 'google-vision-ocr'): - try: - last_JSON_response, total_cost = voucher_vision(cfg_file_path, dir_home, cfg_test=None, progress_report=progress_report, test_ind=int(test_ind)) - test_results[cfg] = True - JSON_results[ind] = last_JSON_response - except Exception as e: - JSON_results[ind] = None - test_results[cfg] = False - print(f"An exception occurred: {e}") - traceback.print_exc() # This will print the full traceback - else: - fail_response = '' - if not check_API_key(dir_home, 'google-vision-ocr'): - fail_response += "No API key found for Google Vision OCR" - if not check_API_key(dir_home, api_version): - fail_response += f" + No API key found for {api_version}" - test_results[cfg] = False - JSON_results[ind] = fail_response - print(f"No API key found for {fail_response}") - - return test_results, JSON_results - -def run_demo_tests_Palm(progress_report): - api_version = 'palm' - - dir_home, path_to_configs, test_results = build_demo_tests('palm') - progress_report.set_n_overall(len(test_results.items())) - - JSON_results = {} - - for ind, (cfg, result) in enumerate(test_results.items()): - OPT1, OPT2, OPT3 = TestOptionsPalm.get_options() - test_ind, ind_opt1, ind_opt2, ind_opt3 = cfg.split('__') - opt1_readable = OPT1[int(ind_opt1.split('-')[1])] - opt2_readable = "Use LeafMachine2 for Collage Images" if OPT2[int(ind_opt2.split('-')[1])] else "Don't use LeafMachine2 for Collage Images" - opt3_readable = f"Prompt {OPT3[int(ind_opt3.split('-')[1])]}" - # opt3_readable = "Use Domain Knowledge" if OPT3[int(ind_opt3.split('-')[1])] else "Don't use Domain Knowledge" - # Construct the human-readable test name - human_readable_name = f"{opt1_readable}, {opt2_readable}, {opt3_readable}" - get_n_overall = progress_report.get_n_overall() - progress_report.update_overall(f"Test {int(test_ind)+1} of {get_n_overall} --- Validating {human_readable_name}") - print_main_fail(f"Starting validation test: {human_readable_name}") - cfg_file_path = os.path.join(path_to_configs,'.'.join([cfg,'yaml'])) - - if check_API_key(dir_home, api_version) and check_API_key(dir_home, 'google-vision-ocr') : - try: - last_JSON_response, total_cost = voucher_vision(cfg_file_path, dir_home, cfg_test=None, progress_report=progress_report, test_ind=int(test_ind)) - test_results[cfg] = True - JSON_results[ind] = last_JSON_response - except Exception as e: - test_results[cfg] = False - JSON_results[ind] = None - print(f"An exception occurred: {e}") - traceback.print_exc() # This will print the full traceback - else: - fail_response = '' - if not check_API_key(dir_home, 'google-vision-ocr'): - fail_response += "No API key found for Google Vision OCR" - if not check_API_key(dir_home, api_version): - fail_response += f" + No API key found for {api_version}" - test_results[cfg] = False - JSON_results[ind] = fail_response - print(f"No API key found for {fail_response}") - - return test_results, JSON_results - -def run_api_tests(api): - try: - dir_home, path_to_configs, test_results = build_api_tests(api) - - JSON_results = {} - - for ind, (cfg, result) in enumerate(test_results.items()): - if api == 'openai': - OPT1, OPT2, OPT3 = TestOptionsAPI_openai.get_options() - elif 'azure_openai': - OPT1, OPT2, OPT3 = TestOptionsAPI_azure_openai.get_options() - elif 'palm': - OPT1, OPT2, OPT3 = TestOptionsAPI_palm.get_options() - test_ind, ind_opt1, ind_opt2, ind_opt3 = cfg.split('__') - opt1_readable = OPT1[int(ind_opt1.split('-')[1])] - opt2_readable = "Use LeafMachine2 for Collage Images" if OPT2[int(ind_opt2.split('-')[1])] else "Don't use LeafMachine2 for Collage Images" - opt3_readable = f"Prompt {OPT3[int(ind_opt3.split('-')[1])]}" - # opt3_readable = "Use Domain Knowledge" if OPT3[int(ind_opt3.split('-')[1])] else "Don't use Domain Knowledge" - # Construct the human-readable test name - human_readable_name = f"{opt1_readable}, {opt2_readable}, {opt3_readable}" - print_main_fail(f"Starting validation test: {human_readable_name}") - cfg_file_path = os.path.join(path_to_configs,'.'.join([cfg,'yaml'])) - - if check_API_key(dir_home, api) and check_API_key(dir_home, 'google-vision-ocr') : - try: - last_JSON_response, total_cost = voucher_vision(cfg_file_path, dir_home, None, cfg_test=None, progress_report=None, test_ind=int(test_ind)) - test_results[cfg] = True - JSON_results[ind] = last_JSON_response - return True - - except Exception as e: - print(e) - return False - else: - return False - except Exception as e: - print(e) - return False - -# def has_API_key(val): -# if val != '': -# return True -# else: -# return False -def has_API_key(key_name): - # Check if the environment variable by key_name is not None - return os.getenv(key_name) is not None - - -# def check_if_usable(): -# dir_home = os.path.dirname(os.path.dirname(__file__)) -# path_cfg_private = os.path.join(dir_home, 'PRIVATE_DATA.yaml') -# cfg_private = get_cfg_from_full_path(path_cfg_private) - -# has_key_openai = has_API_key(cfg_private['openai']['OPENAI_API_KEY']) - -# has_key_azure_openai = has_API_key(cfg_private['openai_azure']['api_version']) - -# has_key_palm2 = has_API_key(cfg_private['google_palm']['google_palm_api']) - -# has_key_google_OCR = has_API_key(cfg_private['google_cloud']['path_json_file']) - -# if has_key_google_OCR and (has_key_azure_openai or has_key_openai or has_key_palm2): -# return True -# else: -# return False -def check_if_usable(): - has_key_openai = os.getenv('OPENAI_API_KEY') is not None - has_key_palm2 = os.getenv('PALM_API_KEY') is not None - has_key_google_OCR = os.getenv('GOOGLE_APPLICATION_CREDENTIALS') is not None - - return has_key_google_OCR and (has_key_openai or has_key_palm2) - -# def check_API_key(dir_home, api_version): -# dir_home = os.path.dirname(os.path.dirname(__file__)) -# path_cfg_private = os.path.join(dir_home, 'PRIVATE_DATA.yaml') -# cfg_private = get_cfg_from_full_path(path_cfg_private) - -# has_key_openai = has_API_key(cfg_private['openai']['OPENAI_API_KEY']) - -# has_key_azure_openai = has_API_key(cfg_private['openai_azure']['api_version']) - -# has_key_palm2 = has_API_key(cfg_private['google_palm']['google_palm_api']) - -# has_key_google_OCR = has_API_key(cfg_private['google_cloud']['path_json_file']) - -# if api_version == 'palm' and has_key_palm2: -# return True -# elif api_version in ['gpt','openai'] and has_key_openai: -# return True -# elif api_version in ['gpt-azure', 'azure_openai'] and has_key_azure_openai: -# return True -# elif api_version == 'google-vision-ocr' and has_key_google_OCR: -# return True -# else: -# return False -def check_API_key(api_version): - # The API keys are assumed to be set in the environment variables - has_key_openai = os.getenv('OPENAI_API_KEY') is not None - has_key_palm2 = os.getenv('PALM') is not None - has_key_google_OCR = os.getenv('GOOGLE_APPLICATION_CREDENTIALS') is not None - - # Depending on the api_version, check if the corresponding key is present - if api_version == 'palm' and has_key_palm2: - return True - elif api_version in ['gpt', 'openai'] and has_key_openai: - return True - elif api_version == 'google-vision-ocr' and has_key_google_OCR: - return True - else: - return False - diff --git a/spaces/pierreguillou/whisper-demo-portuguese/app.py b/spaces/pierreguillou/whisper-demo-portuguese/app.py deleted file mode 100644 index ddd341066e66c5207b547bac76cd9e43a4c9fb0a..0000000000000000000000000000000000000000 --- a/spaces/pierreguillou/whisper-demo-portuguese/app.py +++ /dev/null @@ -1,98 +0,0 @@ -import torch - -import gradio as gr -import pytube as pt -from transformers import pipeline -from huggingface_hub import model_info - -MODEL_NAME = "pierreguillou/whisper-medium-portuguese" #this always needs to stay in line 8 :D sorry for the hackiness -lang = "pt" - -device = 0 if torch.cuda.is_available() else "cpu" - -pipe = pipeline( - task="automatic-speech-recognition", - model=MODEL_NAME, - chunk_length_s=30, - device=device, -) - -pipe.model.config.forced_decoder_ids = pipe.tokenizer.get_decoder_prompt_ids(language=lang, task="transcribe") - -def transcribe(microphone, file_upload): - warn_output = "" - if (microphone is not None) and (file_upload is not None): - warn_output = ( - "WARNING: You've uploaded an audio file and used the microphone. " - "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n" - ) - - elif (microphone is None) and (file_upload is None): - return "ERROR: You have to either use the microphone or upload an audio file" - - file = microphone if microphone is not None else file_upload - - text = pipe(file)["text"] - - return warn_output + text - - -def _return_yt_html_embed(yt_url): - video_id = yt_url.split("?v=")[-1] - HTML_str = ( - f'
    ' - "
    " - ) - return HTML_str - - -def yt_transcribe(yt_url): - yt = pt.YouTube(yt_url) - html_embed_str = _return_yt_html_embed(yt_url) - stream = yt.streams.filter(only_audio=True)[0] - stream.download(filename="audio.mp3") - - text = pipe("audio.mp3")["text"] - - return html_embed_str, text - - -demo = gr.Blocks() - -mf_transcribe = gr.Interface( - fn=transcribe, - inputs=[ - gr.inputs.Audio(source="microphone", type="filepath", optional=True), - gr.inputs.Audio(source="upload", type="filepath", optional=True), - ], - outputs="text", - layout="horizontal", - theme="huggingface", - title="Whisper Demo: Transcribe Portuguese Audio", - description=( - "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the the fine-tuned" - f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files" - " of arbitrary length (all information about this Web APP in this blog post: [Speech-to-Text & IA | Transcreva qualquer áudio para o português com o Whisper (OpenAI)... sem nenhum custo!](https://medium.com/@pierre_guillou/speech-to-text-ia-transcreva-qualquer-%C3%A1udio-para-o-portugu%C3%AAs-com-o-whisper-openai-sem-ad0c17384681))." - ), - allow_flagging="never", -) - -yt_transcribe = gr.Interface( - fn=yt_transcribe, - inputs=[gr.inputs.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL")], - outputs=["html", "text"], - layout="horizontal", - theme="huggingface", - title="Whisper Demo: Transcribe Portuguese YouTube", - description=( - "Transcribe long-form YouTube videos with the click of a button! Demo uses the the fine-tuned checkpoint:" - f" [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files of" - " arbitrary length (all information about this Web APP in this blog post: [Speech-to-Text & IA | Transcreva qualquer áudio para o português com o Whisper (OpenAI)... sem nenhum custo!](https://medium.com/@pierre_guillou/speech-to-text-ia-transcreva-qualquer-%C3%A1udio-para-o-portugu%C3%AAs-com-o-whisper-openai-sem-ad0c17384681))." - ), - allow_flagging="never", -) - -with demo: - gr.TabbedInterface([mf_transcribe, yt_transcribe], ["Transcribe Audio", "Transcribe YouTube"]) - -demo.launch(enable_queue=True) diff --git a/spaces/pinkq/Newbing/src/components/ui/voice/index.tsx b/spaces/pinkq/Newbing/src/components/ui/voice/index.tsx deleted file mode 100644 index 4adcb632226bfced8b97092782811edf08b56569..0000000000000000000000000000000000000000 --- a/spaces/pinkq/Newbing/src/components/ui/voice/index.tsx +++ /dev/null @@ -1,28 +0,0 @@ -import './index.scss' - -export interface VoiceProps extends CSSPropertyRule { - num?: number; - duration?: number; -} -export default function Voice({ duration = 400, num = 7, ...others }) { - return ( -
    - {Array.from({ length: num }).map((_, index) => { - const randomDuration = Math.random() * 100 + duration - const initialDelay = Math.random() * 2 * duration - const initialScale = Math.sin((index + 1) * Math.PI / num) - return ( -
    - ) - })} -
    - ) -} diff --git a/spaces/pinkq/Newbing/src/pages/api/image.ts b/spaces/pinkq/Newbing/src/pages/api/image.ts deleted file mode 100644 index 4b894bea86050c0f3888cc56f60c0cb7f8b57cfc..0000000000000000000000000000000000000000 --- a/spaces/pinkq/Newbing/src/pages/api/image.ts +++ /dev/null @@ -1,40 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' -import { debug } from '@/lib/isomorphic' -import { createHeaders } from '@/lib/utils' -import { createImage } from '@/lib/bots/bing/utils' - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - const { prompt, id } = req.query - if (!prompt) { - return res.json({ - result: { - value: 'Image', - message: 'No Prompt' - } - }) - } - try { - const headers = createHeaders(req.cookies, { - IMAGE_BING_COOKIE: process.env.IMAGE_BING_COOKIE - }) - - debug('headers', headers) - const response = await createImage(String(prompt), String(id), { - ...headers, - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - }) - res.writeHead(200, { - 'Content-Type': 'text/plain; charset=UTF-8', - }) - return res.end(response) - } catch (e) { - return res.json({ - result: { - value: 'Error', - message: `${e}` - } - }) - } -} diff --git a/spaces/prajwalkhairnar/facial_emotion_detection_multiclass/README.md b/spaces/prajwalkhairnar/facial_emotion_detection_multiclass/README.md deleted file mode 100644 index bf3931a01745f473669855e8ebd0a2969de4fbb6..0000000000000000000000000000000000000000 --- a/spaces/prajwalkhairnar/facial_emotion_detection_multiclass/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Facial Emotion Detection Multiclass -emoji: 👀 -colorFrom: purple -colorTo: pink -sdk: gradio -sdk_version: 3.28.3 -app_file: app.py -pinned: false -license: unknown ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/misc/roundTools.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/misc/roundTools.py deleted file mode 100644 index 48a47c07c8575895f894a24065046bc308a69b97..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/misc/roundTools.py +++ /dev/null @@ -1,109 +0,0 @@ -""" -Various round-to-integer helpers. -""" - -import math -import functools -import logging - -log = logging.getLogger(__name__) - -__all__ = [ - "noRound", - "otRound", - "maybeRound", - "roundFunc", -] - - -def noRound(value): - return value - - -def otRound(value): - """Round float value to nearest integer towards ``+Infinity``. - - The OpenType spec (in the section on `"normalization" of OpenType Font Variations `_) - defines the required method for converting floating point values to - fixed-point. In particular it specifies the following rounding strategy: - - for fractional values of 0.5 and higher, take the next higher integer; - for other fractional values, truncate. - - This function rounds the floating-point value according to this strategy - in preparation for conversion to fixed-point. - - Args: - value (float): The input floating-point value. - - Returns - float: The rounded value. - """ - # See this thread for how we ended up with this implementation: - # https://github.com/fonttools/fonttools/issues/1248#issuecomment-383198166 - return int(math.floor(value + 0.5)) - - -def maybeRound(v, tolerance, round=otRound): - rounded = round(v) - return rounded if abs(rounded - v) <= tolerance else v - - -def roundFunc(tolerance, round=otRound): - if tolerance < 0: - raise ValueError("Rounding tolerance must be positive") - - if tolerance == 0: - return noRound - - if tolerance >= 0.5: - return round - - return functools.partial(maybeRound, tolerance=tolerance, round=round) - - -def nearestMultipleShortestRepr(value: float, factor: float) -> str: - """Round to nearest multiple of factor and return shortest decimal representation. - - This chooses the float that is closer to a multiple of the given factor while - having the shortest decimal representation (the least number of fractional decimal - digits). - - For example, given the following: - - >>> nearestMultipleShortestRepr(-0.61883544921875, 1.0/(1<<14)) - '-0.61884' - - Useful when you need to serialize or print a fixed-point number (or multiples - thereof, such as F2Dot14 fractions of 180 degrees in COLRv1 PaintRotate) in - a human-readable form. - - Args: - value (value): The value to be rounded and serialized. - factor (float): The value which the result is a close multiple of. - - Returns: - str: A compact string representation of the value. - """ - if not value: - return "0.0" - - value = otRound(value / factor) * factor - eps = 0.5 * factor - lo = value - eps - hi = value + eps - # If the range of valid choices spans an integer, return the integer. - if int(lo) != int(hi): - return str(float(round(value))) - - fmt = "%.8f" - lo = fmt % lo - hi = fmt % hi - assert len(lo) == len(hi) and lo != hi - for i in range(len(lo)): - if lo[i] != hi[i]: - break - period = lo.find(".") - assert period < i - fmt = "%%.%df" % (i - period) - return fmt % value diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/_frontend_code/wasm/src/webworker/http.ts b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/_frontend_code/wasm/src/webworker/http.ts deleted file mode 100644 index 2cd19bb3745c15d58e28961a96067c2fbc5c3cfb..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/_frontend_code/wasm/src/webworker/http.ts +++ /dev/null @@ -1,133 +0,0 @@ -import type { PyProxy } from "pyodide/ffi"; -import type { HttpRequest, HttpResponse } from "../message-types"; - -// Inspired by https://github.com/rstudio/shinylive/blob/v0.1.2/src/messageporthttp.ts - -// A reference to an ASGI application instance in Python -// Ref: https://asgi.readthedocs.io/en/latest/specs/main.html#applications -type ASGIApplication = ( - scope: Record, - receive: () => Promise, - send: (event: PyProxy) => Promise -) => Promise; - -type ReceiveEvent = RequestReceiveEvent | DisconnectReceiveEvent; -// https://asgi.readthedocs.io/en/latest/specs/www.html#request-receive-event -interface RequestReceiveEvent { - type: "http.request"; - body?: Uint8Array; // `bytes` in Python - more_body: boolean; -} -// https://asgi.readthedocs.io/en/latest/specs/www.html#disconnect-receive-event -interface DisconnectReceiveEvent { - type: "http.disconnect"; -} - -type SendEvent = ResponseStartSendEvent | ResponseBodySendEvent; -// https://asgi.readthedocs.io/en/latest/specs/www.html#response-start-send-event -interface ResponseStartSendEvent { - type: "http.response.start"; - status: number; - headers: Iterable<[Uint8Array, Uint8Array]>; - trailers: boolean; -} -// https://asgi.readthedocs.io/en/latest/specs/www.html#response-body-send-event -interface ResponseBodySendEvent { - type: "http.response.body"; - body: Uint8Array; // `bytes` in Python - more_body: boolean; -} - -function headersToASGI(headers: HttpRequest["headers"]): [string, string][] { - const result: [string, string][] = []; - for (const [key, value] of Object.entries(headers)) { - result.push([key, value]); - } - return result; -} - -export function uint8ArrayToString(buf: Uint8Array): string { - let result = ""; - for (let i = 0; i < buf.length; i++) { - result += String.fromCharCode(buf[i]); - } - return result; -} - -function asgiHeadersToRecord(headers: any): Record { - headers = headers.map(([key, val]: [Uint8Array, Uint8Array]) => { - return [uint8ArrayToString(key), uint8ArrayToString(val)]; - }); - return Object.fromEntries(headers); -} - -export const makeHttpRequest = ( - asgiApp: ASGIApplication, - request: HttpRequest -): Promise => - new Promise((resolve, reject) => { - let sent = false; - async function receiveFromJs(): Promise { - if (sent) { - // NOTE: I implemented this block just referring to the spec. However, it is not reached in practice so it's not combat-proven. - return { - type: "http.disconnect" - }; - } - - const event: RequestReceiveEvent = { - type: "http.request", - more_body: false - }; - if (request.body) { - event.body = request.body; - } - - console.debug("receive", event); - sent = true; - return event; - } - - let status: number; - let headers: { [key: string]: string }; - let body: Uint8Array = new Uint8Array(); - async function sendToJs(proxiedEvent: PyProxy): Promise { - const event = Object.fromEntries(proxiedEvent.toJs()) as SendEvent; - console.debug("send", event); - if (event.type === "http.response.start") { - status = event.status; - headers = asgiHeadersToRecord(event.headers); - } else if (event.type === "http.response.body") { - body = new Uint8Array([...body, ...event.body]); - if (!event.more_body) { - const response: HttpResponse = { - status, - headers, - body - }; - console.debug("HTTP response", response); - resolve(response); - } - } else { - throw new Error(`Unhandled ASGI event: ${JSON.stringify(event)}`); - } - } - - // https://asgi.readthedocs.io/en/latest/specs/www.html#http-connection-scope - const scope = { - type: "http", - asgi: { - version: "3.0", - spec_version: "2.1" - }, - http_version: "1.1", - scheme: "http", - method: request.method, - path: request.path, - query_string: request.query_string, - root_path: "", - headers: headersToASGI(request.headers) - }; - - asgiApp(scope, receiveFromJs, sendToJs).catch(reject); - }); diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/httpcore/_backends/mock.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/httpcore/_backends/mock.py deleted file mode 100644 index f7aefebf519487bba08cba6af043b00ee453ef81..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/httpcore/_backends/mock.py +++ /dev/null @@ -1,142 +0,0 @@ -import ssl -import typing -from typing import Optional - -from .._exceptions import ReadError -from .base import ( - SOCKET_OPTION, - AsyncNetworkBackend, - AsyncNetworkStream, - NetworkBackend, - NetworkStream, -) - - -class MockSSLObject: - def __init__(self, http2: bool): - self._http2 = http2 - - def selected_alpn_protocol(self) -> str: - return "h2" if self._http2 else "http/1.1" - - -class MockStream(NetworkStream): - def __init__(self, buffer: typing.List[bytes], http2: bool = False) -> None: - self._buffer = buffer - self._http2 = http2 - self._closed = False - - def read(self, max_bytes: int, timeout: Optional[float] = None) -> bytes: - if self._closed: - raise ReadError("Connection closed") - if not self._buffer: - return b"" - return self._buffer.pop(0) - - def write(self, buffer: bytes, timeout: Optional[float] = None) -> None: - pass - - def close(self) -> None: - self._closed = True - - def start_tls( - self, - ssl_context: ssl.SSLContext, - server_hostname: Optional[str] = None, - timeout: Optional[float] = None, - ) -> NetworkStream: - return self - - def get_extra_info(self, info: str) -> typing.Any: - return MockSSLObject(http2=self._http2) if info == "ssl_object" else None - - def __repr__(self) -> str: - return "" - - -class MockBackend(NetworkBackend): - def __init__(self, buffer: typing.List[bytes], http2: bool = False) -> None: - self._buffer = buffer - self._http2 = http2 - - def connect_tcp( - self, - host: str, - port: int, - timeout: Optional[float] = None, - local_address: Optional[str] = None, - socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None, - ) -> NetworkStream: - return MockStream(list(self._buffer), http2=self._http2) - - def connect_unix_socket( - self, - path: str, - timeout: Optional[float] = None, - socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None, - ) -> NetworkStream: - return MockStream(list(self._buffer), http2=self._http2) - - def sleep(self, seconds: float) -> None: - pass - - -class AsyncMockStream(AsyncNetworkStream): - def __init__(self, buffer: typing.List[bytes], http2: bool = False) -> None: - self._buffer = buffer - self._http2 = http2 - self._closed = False - - async def read(self, max_bytes: int, timeout: Optional[float] = None) -> bytes: - if self._closed: - raise ReadError("Connection closed") - if not self._buffer: - return b"" - return self._buffer.pop(0) - - async def write(self, buffer: bytes, timeout: Optional[float] = None) -> None: - pass - - async def aclose(self) -> None: - self._closed = True - - async def start_tls( - self, - ssl_context: ssl.SSLContext, - server_hostname: Optional[str] = None, - timeout: Optional[float] = None, - ) -> AsyncNetworkStream: - return self - - def get_extra_info(self, info: str) -> typing.Any: - return MockSSLObject(http2=self._http2) if info == "ssl_object" else None - - def __repr__(self) -> str: - return "" - - -class AsyncMockBackend(AsyncNetworkBackend): - def __init__(self, buffer: typing.List[bytes], http2: bool = False) -> None: - self._buffer = buffer - self._http2 = http2 - - async def connect_tcp( - self, - host: str, - port: int, - timeout: Optional[float] = None, - local_address: Optional[str] = None, - socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None, - ) -> AsyncNetworkStream: - return AsyncMockStream(list(self._buffer), http2=self._http2) - - async def connect_unix_socket( - self, - path: str, - timeout: Optional[float] = None, - socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None, - ) -> AsyncNetworkStream: - return AsyncMockStream(list(self._buffer), http2=self._http2) - - async def sleep(self, seconds: float) -> None: - pass diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/arrays/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/arrays/__init__.py deleted file mode 100644 index 32e2afc0eef52578e568f08f48e44d2a7c3103f3..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/arrays/__init__.py +++ /dev/null @@ -1,53 +0,0 @@ -""" -All of pandas' ExtensionArrays. - -See :ref:`extending.extension-types` for more. -""" -from pandas.core.arrays import ( - ArrowExtensionArray, - ArrowStringArray, - BooleanArray, - Categorical, - DatetimeArray, - FloatingArray, - IntegerArray, - IntervalArray, - NumpyExtensionArray, - PeriodArray, - SparseArray, - StringArray, - TimedeltaArray, -) - -__all__ = [ - "ArrowExtensionArray", - "ArrowStringArray", - "BooleanArray", - "Categorical", - "DatetimeArray", - "FloatingArray", - "IntegerArray", - "IntervalArray", - "NumpyExtensionArray", - "PeriodArray", - "SparseArray", - "StringArray", - "TimedeltaArray", -] - - -def __getattr__(name: str): - if name == "PandasArray": - # GH#53694 - import warnings - - from pandas.util._exceptions import find_stack_level - - warnings.warn( - "PandasArray has been renamed NumpyExtensionArray. Use that " - "instead. This alias will be removed in a future version.", - FutureWarning, - stacklevel=find_stack_level(), - ) - return NumpyExtensionArray - raise AttributeError(f"module 'pandas.arrays' has no attribute '{name}'") diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/sparse/api.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/sparse/api.py deleted file mode 100644 index 6650a5c4e90a0f73a43e6e35cdd26c1189daf256..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/sparse/api.py +++ /dev/null @@ -1,5 +0,0 @@ -from pandas.core.dtypes.dtypes import SparseDtype - -from pandas.core.arrays.sparse import SparseArray - -__all__ = ["SparseArray", "SparseDtype"] diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/build_env.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/build_env.py deleted file mode 100644 index daeb7fbc8d7c32b0d0e7c2798dc1388c4e97f74d..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/build_env.py +++ /dev/null @@ -1,296 +0,0 @@ -"""Build Environment used for isolation during sdist building -""" - -import contextlib -import logging -import os -import pathlib -import sys -import textwrap -import zipfile -from collections import OrderedDict -from sysconfig import get_paths -from types import TracebackType -from typing import TYPE_CHECKING, Iterable, Iterator, List, Optional, Set, Tuple, Type - -from pip._vendor.certifi import where -from pip._vendor.packaging.requirements import Requirement -from pip._vendor.packaging.version import Version - -from pip import __file__ as pip_location -from pip._internal.cli.spinners import open_spinner -from pip._internal.locations import get_platlib, get_prefixed_libs, get_purelib -from pip._internal.metadata import get_environment -from pip._internal.utils.subprocess import call_subprocess -from pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds - -if TYPE_CHECKING: - from pip._internal.index.package_finder import PackageFinder - -logger = logging.getLogger(__name__) - - -class _Prefix: - def __init__(self, path: str) -> None: - self.path = path - self.setup = False - self.bin_dir = get_paths( - "nt" if os.name == "nt" else "posix_prefix", - vars={"base": path, "platbase": path}, - )["scripts"] - self.lib_dirs = get_prefixed_libs(path) - - -@contextlib.contextmanager -def _create_standalone_pip() -> Iterator[str]: - """Create a "standalone pip" zip file. - - The zip file's content is identical to the currently-running pip. - It will be used to install requirements into the build environment. - """ - source = pathlib.Path(pip_location).resolve().parent - - # Return the current instance if `source` is not a directory. We can't build - # a zip from this, and it likely means the instance is already standalone. - if not source.is_dir(): - yield str(source) - return - - with TempDirectory(kind="standalone-pip") as tmp_dir: - pip_zip = os.path.join(tmp_dir.path, "__env_pip__.zip") - kwargs = {} - if sys.version_info >= (3, 8): - kwargs["strict_timestamps"] = False - with zipfile.ZipFile(pip_zip, "w", **kwargs) as zf: - for child in source.rglob("*"): - zf.write(child, child.relative_to(source.parent).as_posix()) - yield os.path.join(pip_zip, "pip") - - -class BuildEnvironment: - """Creates and manages an isolated environment to install build deps""" - - def __init__(self) -> None: - temp_dir = TempDirectory(kind=tempdir_kinds.BUILD_ENV, globally_managed=True) - - self._prefixes = OrderedDict( - (name, _Prefix(os.path.join(temp_dir.path, name))) - for name in ("normal", "overlay") - ) - - self._bin_dirs: List[str] = [] - self._lib_dirs: List[str] = [] - for prefix in reversed(list(self._prefixes.values())): - self._bin_dirs.append(prefix.bin_dir) - self._lib_dirs.extend(prefix.lib_dirs) - - # Customize site to: - # - ensure .pth files are honored - # - prevent access to system site packages - system_sites = { - os.path.normcase(site) for site in (get_purelib(), get_platlib()) - } - self._site_dir = os.path.join(temp_dir.path, "site") - if not os.path.exists(self._site_dir): - os.mkdir(self._site_dir) - with open( - os.path.join(self._site_dir, "sitecustomize.py"), "w", encoding="utf-8" - ) as fp: - fp.write( - textwrap.dedent( - """ - import os, site, sys - - # First, drop system-sites related paths. - original_sys_path = sys.path[:] - known_paths = set() - for path in {system_sites!r}: - site.addsitedir(path, known_paths=known_paths) - system_paths = set( - os.path.normcase(path) - for path in sys.path[len(original_sys_path):] - ) - original_sys_path = [ - path for path in original_sys_path - if os.path.normcase(path) not in system_paths - ] - sys.path = original_sys_path - - # Second, add lib directories. - # ensuring .pth file are processed. - for path in {lib_dirs!r}: - assert not path in sys.path - site.addsitedir(path) - """ - ).format(system_sites=system_sites, lib_dirs=self._lib_dirs) - ) - - def __enter__(self) -> None: - self._save_env = { - name: os.environ.get(name, None) - for name in ("PATH", "PYTHONNOUSERSITE", "PYTHONPATH") - } - - path = self._bin_dirs[:] - old_path = self._save_env["PATH"] - if old_path: - path.extend(old_path.split(os.pathsep)) - - pythonpath = [self._site_dir] - - os.environ.update( - { - "PATH": os.pathsep.join(path), - "PYTHONNOUSERSITE": "1", - "PYTHONPATH": os.pathsep.join(pythonpath), - } - ) - - def __exit__( - self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> None: - for varname, old_value in self._save_env.items(): - if old_value is None: - os.environ.pop(varname, None) - else: - os.environ[varname] = old_value - - def check_requirements( - self, reqs: Iterable[str] - ) -> Tuple[Set[Tuple[str, str]], Set[str]]: - """Return 2 sets: - - conflicting requirements: set of (installed, wanted) reqs tuples - - missing requirements: set of reqs - """ - missing = set() - conflicting = set() - if reqs: - env = get_environment(self._lib_dirs) - for req_str in reqs: - req = Requirement(req_str) - dist = env.get_distribution(req.name) - if not dist: - missing.add(req_str) - continue - if isinstance(dist.version, Version): - installed_req_str = f"{req.name}=={dist.version}" - else: - installed_req_str = f"{req.name}==={dist.version}" - if dist.version not in req.specifier: - conflicting.add((installed_req_str, req_str)) - # FIXME: Consider direct URL? - return conflicting, missing - - def install_requirements( - self, - finder: "PackageFinder", - requirements: Iterable[str], - prefix_as_string: str, - *, - kind: str, - ) -> None: - prefix = self._prefixes[prefix_as_string] - assert not prefix.setup - prefix.setup = True - if not requirements: - return - with contextlib.ExitStack() as ctx: - pip_runnable = ctx.enter_context(_create_standalone_pip()) - self._install_requirements( - pip_runnable, - finder, - requirements, - prefix, - kind=kind, - ) - - @staticmethod - def _install_requirements( - pip_runnable: str, - finder: "PackageFinder", - requirements: Iterable[str], - prefix: _Prefix, - *, - kind: str, - ) -> None: - args: List[str] = [ - sys.executable, - pip_runnable, - "install", - "--ignore-installed", - "--no-user", - "--prefix", - prefix.path, - "--no-warn-script-location", - ] - if logger.getEffectiveLevel() <= logging.DEBUG: - args.append("-v") - for format_control in ("no_binary", "only_binary"): - formats = getattr(finder.format_control, format_control) - args.extend( - ( - "--" + format_control.replace("_", "-"), - ",".join(sorted(formats or {":none:"})), - ) - ) - - index_urls = finder.index_urls - if index_urls: - args.extend(["-i", index_urls[0]]) - for extra_index in index_urls[1:]: - args.extend(["--extra-index-url", extra_index]) - else: - args.append("--no-index") - for link in finder.find_links: - args.extend(["--find-links", link]) - - for host in finder.trusted_hosts: - args.extend(["--trusted-host", host]) - if finder.allow_all_prereleases: - args.append("--pre") - if finder.prefer_binary: - args.append("--prefer-binary") - args.append("--") - args.extend(requirements) - extra_environ = {"_PIP_STANDALONE_CERT": where()} - with open_spinner(f"Installing {kind}") as spinner: - call_subprocess( - args, - command_desc=f"pip subprocess to install {kind}", - spinner=spinner, - extra_environ=extra_environ, - ) - - -class NoOpBuildEnvironment(BuildEnvironment): - """A no-op drop-in replacement for BuildEnvironment""" - - def __init__(self) -> None: - pass - - def __enter__(self) -> None: - pass - - def __exit__( - self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> None: - pass - - def cleanup(self) -> None: - pass - - def install_requirements( - self, - finder: "PackageFinder", - requirements: Iterable[str], - prefix_as_string: str, - *, - kind: str, - ) -> None: - raise NotImplementedError() diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/response.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/response.py deleted file mode 100644 index 5ea609ccedf18eb4ab70f8fc6990448eb6407237..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/response.py +++ /dev/null @@ -1,107 +0,0 @@ -from __future__ import absolute_import - -from email.errors import MultipartInvariantViolationDefect, StartBoundaryNotFoundDefect - -from ..exceptions import HeaderParsingError -from ..packages.six.moves import http_client as httplib - - -def is_fp_closed(obj): - """ - Checks whether a given file-like object is closed. - - :param obj: - The file-like object to check. - """ - - try: - # Check `isclosed()` first, in case Python3 doesn't set `closed`. - # GH Issue #928 - return obj.isclosed() - except AttributeError: - pass - - try: - # Check via the official file-like-object way. - return obj.closed - except AttributeError: - pass - - try: - # Check if the object is a container for another file-like object that - # gets released on exhaustion (e.g. HTTPResponse). - return obj.fp is None - except AttributeError: - pass - - raise ValueError("Unable to determine whether fp is closed.") - - -def assert_header_parsing(headers): - """ - Asserts whether all headers have been successfully parsed. - Extracts encountered errors from the result of parsing headers. - - Only works on Python 3. - - :param http.client.HTTPMessage headers: Headers to verify. - - :raises urllib3.exceptions.HeaderParsingError: - If parsing errors are found. - """ - - # This will fail silently if we pass in the wrong kind of parameter. - # To make debugging easier add an explicit check. - if not isinstance(headers, httplib.HTTPMessage): - raise TypeError("expected httplib.Message, got {0}.".format(type(headers))) - - defects = getattr(headers, "defects", None) - get_payload = getattr(headers, "get_payload", None) - - unparsed_data = None - if get_payload: - # get_payload is actually email.message.Message.get_payload; - # we're only interested in the result if it's not a multipart message - if not headers.is_multipart(): - payload = get_payload() - - if isinstance(payload, (bytes, str)): - unparsed_data = payload - if defects: - # httplib is assuming a response body is available - # when parsing headers even when httplib only sends - # header data to parse_headers() This results in - # defects on multipart responses in particular. - # See: https://github.com/urllib3/urllib3/issues/800 - - # So we ignore the following defects: - # - StartBoundaryNotFoundDefect: - # The claimed start boundary was never found. - # - MultipartInvariantViolationDefect: - # A message claimed to be a multipart but no subparts were found. - defects = [ - defect - for defect in defects - if not isinstance( - defect, (StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect) - ) - ] - - if defects or unparsed_data: - raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data) - - -def is_response_to_head(response): - """ - Checks whether the request of a response has been a HEAD-request. - Handles the quirks of AppEngine. - - :param http.client.HTTPResponse response: - Response to check if the originating request - used 'HEAD' as a method. - """ - # FIXME: Can we do this somehow without accessing private httplib _method? - method = response._method - if isinstance(method, int): # Platform-specific: Appengine - return method == 3 - return method.upper() == "HEAD" diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/nit.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/nit.py deleted file mode 100644 index b4e85f304d969d83d51730c4d145b1677c57a251..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/nit.py +++ /dev/null @@ -1,64 +0,0 @@ -""" - pygments.lexers.nit - ~~~~~~~~~~~~~~~~~~~ - - Lexer for the Nit language. - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.lexer import RegexLexer, words -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation - -__all__ = ['NitLexer'] - - -class NitLexer(RegexLexer): - """ - For nit source. - - .. versionadded:: 2.0 - """ - - name = 'Nit' - url = 'http://nitlanguage.org' - aliases = ['nit'] - filenames = ['*.nit'] - tokens = { - 'root': [ - (r'#.*?$', Comment.Single), - (words(( - 'package', 'module', 'import', 'class', 'abstract', 'interface', - 'universal', 'enum', 'end', 'fun', 'type', 'init', 'redef', - 'isa', 'do', 'readable', 'writable', 'var', 'intern', 'extern', - 'public', 'protected', 'private', 'intrude', 'if', 'then', - 'else', 'while', 'loop', 'for', 'in', 'and', 'or', 'not', - 'implies', 'return', 'continue', 'break', 'abort', 'assert', - 'new', 'is', 'once', 'super', 'self', 'true', 'false', 'nullable', - 'null', 'as', 'isset', 'label', '__debug__'), suffix=r'(?=[\r\n\t( ])'), - Keyword), - (r'[A-Z]\w*', Name.Class), - (r'"""(([^\'\\]|\\.)|\\r|\\n)*((\{\{?)?(""?\{\{?)*""""*)', String), # Simple long string - (r'\'\'\'(((\\.|[^\'\\])|\\r|\\n)|\'((\\.|[^\'\\])|\\r|\\n)|' - r'\'\'((\\.|[^\'\\])|\\r|\\n))*\'\'\'', String), # Simple long string alt - (r'"""(([^\'\\]|\\.)|\\r|\\n)*((""?)?(\{\{?""?)*\{\{\{\{*)', String), # Start long string - (r'\}\}\}(((\\.|[^\'\\])|\\r|\\n))*(""?)?(\{\{?""?)*\{\{\{\{*', String), # Mid long string - (r'\}\}\}(((\\.|[^\'\\])|\\r|\\n))*(\{\{?)?(""?\{\{?)*""""*', String), # End long string - (r'"(\\.|([^"}{\\]))*"', String), # Simple String - (r'"(\\.|([^"}{\\]))*\{', String), # Start string - (r'\}(\\.|([^"}{\\]))*\{', String), # Mid String - (r'\}(\\.|([^"}{\\]))*"', String), # End String - (r'(\'[^\'\\]\')|(\'\\.\')', String.Char), - (r'[0-9]+', Number.Integer), - (r'[0-9]*.[0-9]+', Number.Float), - (r'0(x|X)[0-9A-Fa-f]+', Number.Hex), - (r'[a-z]\w*', Name), - (r'_\w+', Name.Variable.Instance), - (r'==|!=|<==>|>=|>>|>|<=|<<|<|\+|-|=|/|\*|%|\+=|-=|!|@', Operator), - (r'\(|\)|\[|\]|,|\.\.\.|\.\.|\.|::|:', Punctuation), - (r'`\{[^`]*`\}', Text), # Extern blocks won't be Lexed by Nit - (r'[\r\n\t ]+', Text), - ], - } diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/_distutils/command/install_egg_info.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/_distutils/command/install_egg_info.py deleted file mode 100644 index 0ddc7367cc608dac2cfb408a08c8b442278a8207..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/_distutils/command/install_egg_info.py +++ /dev/null @@ -1,77 +0,0 @@ -"""distutils.command.install_egg_info - -Implements the Distutils 'install_egg_info' command, for installing -a package's PKG-INFO metadata.""" - - -from distutils.cmd import Command -from distutils import log, dir_util -import os, sys, re - -class install_egg_info(Command): - """Install an .egg-info file for the package""" - - description = "Install package's PKG-INFO metadata as an .egg-info file" - user_options = [ - ('install-dir=', 'd', "directory to install to"), - ] - - def initialize_options(self): - self.install_dir = None - - def finalize_options(self): - self.set_undefined_options('install_lib',('install_dir','install_dir')) - basename = "%s-%s-py%d.%d.egg-info" % ( - to_filename(safe_name(self.distribution.get_name())), - to_filename(safe_version(self.distribution.get_version())), - *sys.version_info[:2] - ) - self.target = os.path.join(self.install_dir, basename) - self.outputs = [self.target] - - def run(self): - target = self.target - if os.path.isdir(target) and not os.path.islink(target): - dir_util.remove_tree(target, dry_run=self.dry_run) - elif os.path.exists(target): - self.execute(os.unlink,(self.target,),"Removing "+target) - elif not os.path.isdir(self.install_dir): - self.execute(os.makedirs, (self.install_dir,), - "Creating "+self.install_dir) - log.info("Writing %s", target) - if not self.dry_run: - with open(target, 'w', encoding='UTF-8') as f: - self.distribution.metadata.write_pkg_file(f) - - def get_outputs(self): - return self.outputs - - -# The following routines are taken from setuptools' pkg_resources module and -# can be replaced by importing them from pkg_resources once it is included -# in the stdlib. - -def safe_name(name): - """Convert an arbitrary string to a standard distribution name - - Any runs of non-alphanumeric/. characters are replaced with a single '-'. - """ - return re.sub('[^A-Za-z0-9.]+', '-', name) - - -def safe_version(version): - """Convert an arbitrary string to a standard version string - - Spaces become dots, and all other non-alphanumeric characters become - dashes, with runs of multiple dashes condensed to a single dash. - """ - version = version.replace(' ','.') - return re.sub('[^A-Za-z0-9.]+', '-', version) - - -def to_filename(name): - """Convert a project or version name to its filename-escaped form - - Any '-' characters are currently replaced with '_'. - """ - return name.replace('-','_') diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/urllib3/_base_connection.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/urllib3/_base_connection.py deleted file mode 100644 index 25b633af257f20c87424ec8a341cbd6f5797b804..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/urllib3/_base_connection.py +++ /dev/null @@ -1,173 +0,0 @@ -from __future__ import annotations - -import typing - -from .util.connection import _TYPE_SOCKET_OPTIONS -from .util.timeout import _DEFAULT_TIMEOUT, _TYPE_TIMEOUT -from .util.url import Url - -_TYPE_BODY = typing.Union[bytes, typing.IO[typing.Any], typing.Iterable[bytes], str] - - -class ProxyConfig(typing.NamedTuple): - ssl_context: ssl.SSLContext | None - use_forwarding_for_https: bool - assert_hostname: None | str | Literal[False] - assert_fingerprint: str | None - - -class _ResponseOptions(typing.NamedTuple): - # TODO: Remove this in favor of a better - # HTTP request/response lifecycle tracking. - request_method: str - request_url: str - preload_content: bool - decode_content: bool - enforce_content_length: bool - - -if typing.TYPE_CHECKING: - import ssl - - from typing_extensions import Literal, Protocol - - from .response import BaseHTTPResponse - - class BaseHTTPConnection(Protocol): - default_port: typing.ClassVar[int] - default_socket_options: typing.ClassVar[_TYPE_SOCKET_OPTIONS] - - host: str - port: int - timeout: None | ( - float - ) # Instance doesn't store _DEFAULT_TIMEOUT, must be resolved. - blocksize: int - source_address: tuple[str, int] | None - socket_options: _TYPE_SOCKET_OPTIONS | None - - proxy: Url | None - proxy_config: ProxyConfig | None - - is_verified: bool - proxy_is_verified: bool | None - - def __init__( - self, - host: str, - port: int | None = None, - *, - timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT, - source_address: tuple[str, int] | None = None, - blocksize: int = 8192, - socket_options: _TYPE_SOCKET_OPTIONS | None = ..., - proxy: Url | None = None, - proxy_config: ProxyConfig | None = None, - ) -> None: - ... - - def set_tunnel( - self, - host: str, - port: int | None = None, - headers: typing.Mapping[str, str] | None = None, - scheme: str = "http", - ) -> None: - ... - - def connect(self) -> None: - ... - - def request( - self, - method: str, - url: str, - body: _TYPE_BODY | None = None, - headers: typing.Mapping[str, str] | None = None, - # We know *at least* botocore is depending on the order of the - # first 3 parameters so to be safe we only mark the later ones - # as keyword-only to ensure we have space to extend. - *, - chunked: bool = False, - preload_content: bool = True, - decode_content: bool = True, - enforce_content_length: bool = True, - ) -> None: - ... - - def getresponse(self) -> BaseHTTPResponse: - ... - - def close(self) -> None: - ... - - @property - def is_closed(self) -> bool: - """Whether the connection either is brand new or has been previously closed. - If this property is True then both ``is_connected`` and ``has_connected_to_proxy`` - properties must be False. - """ - - @property - def is_connected(self) -> bool: - """Whether the connection is actively connected to any origin (proxy or target)""" - - @property - def has_connected_to_proxy(self) -> bool: - """Whether the connection has successfully connected to its proxy. - This returns False if no proxy is in use. Used to determine whether - errors are coming from the proxy layer or from tunnelling to the target origin. - """ - - class BaseHTTPSConnection(BaseHTTPConnection, Protocol): - default_port: typing.ClassVar[int] - default_socket_options: typing.ClassVar[_TYPE_SOCKET_OPTIONS] - - # Certificate verification methods - cert_reqs: int | str | None - assert_hostname: None | str | Literal[False] - assert_fingerprint: str | None - ssl_context: ssl.SSLContext | None - - # Trusted CAs - ca_certs: str | None - ca_cert_dir: str | None - ca_cert_data: None | str | bytes - - # TLS version - ssl_minimum_version: int | None - ssl_maximum_version: int | None - ssl_version: int | str | None # Deprecated - - # Client certificates - cert_file: str | None - key_file: str | None - key_password: str | None - - def __init__( - self, - host: str, - port: int | None = None, - *, - timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT, - source_address: tuple[str, int] | None = None, - blocksize: int = 16384, - socket_options: _TYPE_SOCKET_OPTIONS | None = ..., - proxy: Url | None = None, - proxy_config: ProxyConfig | None = None, - cert_reqs: int | str | None = None, - assert_hostname: None | str | Literal[False] = None, - assert_fingerprint: str | None = None, - server_hostname: str | None = None, - ssl_context: ssl.SSLContext | None = None, - ca_certs: str | None = None, - ca_cert_dir: str | None = None, - ca_cert_data: None | str | bytes = None, - ssl_minimum_version: int | None = None, - ssl_maximum_version: int | None = None, - ssl_version: int | str | None = None, # Deprecated - cert_file: str | None = None, - key_file: str | None = None, - key_password: str | None = None, - ) -> None: - ... diff --git a/spaces/pustozerov/poc-handwriting-ocr/modules/data_generator/font.py b/spaces/pustozerov/poc-handwriting-ocr/modules/data_generator/font.py deleted file mode 100644 index dabf30df2bc08381160472033a8e128cf3548738..0000000000000000000000000000000000000000 --- a/spaces/pustozerov/poc-handwriting-ocr/modules/data_generator/font.py +++ /dev/null @@ -1,39 +0,0 @@ -import os - - -class Font: - def __init__(self, path, cyr_small, cyr_capit, digits, chars=None, size_coef=1): - if chars is None: - chars = [] - self.path = path - self.chars = chars - self.size_coef = size_coef - - if cyr_small: - for char in range(1072, 1104): - self.chars.append(chr(char)) - self.chars.append('ё') - if cyr_capit: - for char in range(1040, 1072): - self.chars.append(chr(char)) - if digits: - for i in range(0, 10): - self.chars.append(str(i)) - - def is_valid(self, string): - set(string) - for char in string: - if char not in self.chars: - return False - return True - - def __str__(self): - result = self.path + '\n' - for char in self.chars: - result += char + ' ' - return result - - -dir_name = os.path.dirname(__file__) -DIR = os.path.join(dir_name, '../../data/generated_dataset/handwriting_fonts/') -f = [] diff --git a/spaces/pycoming/bingo/src/components/chat-image.tsx b/spaces/pycoming/bingo/src/components/chat-image.tsx deleted file mode 100644 index 05ecc9771eada27a0f2d160bb01cba170d37bb09..0000000000000000000000000000000000000000 --- a/spaces/pycoming/bingo/src/components/chat-image.tsx +++ /dev/null @@ -1,170 +0,0 @@ -import { - useEffect, - useState, - useCallback, - ChangeEvent, - ClipboardEvent, - MouseEventHandler, - FormEvent, - useRef -} from "react" -import Image from 'next/image' -import PasteIcon from '@/assets/images/paste.svg' -import UploadIcon from '@/assets/images/upload.svg' -import CameraIcon from '@/assets/images/camera.svg' -import { useBing } from '@/lib/hooks/use-bing' -import { cn } from '@/lib/utils' - -interface ChatImageProps extends Pick, 'uploadImage'> {} - -const preventDefault: MouseEventHandler = (event) => { - event.nativeEvent.stopImmediatePropagation() -} - -const toBase64 = (file: File): Promise => new Promise((resolve, reject) => { - const reader = new FileReader() - reader.readAsDataURL(file) - reader.onload = () => resolve(reader.result as string) - reader.onerror = reject -}) - -export function ChatImage({ children, uploadImage }: React.PropsWithChildren) { - const videoRef = useRef(null) - const canvasRef = useRef(null) - const mediaStream = useRef() - const [panel, setPanel] = useState('none') - - const upload = useCallback((url: string) => { - if (url) { - uploadImage(url) - } - setPanel('none') - }, [panel]) - - const onUpload = useCallback(async (event: ChangeEvent) => { - const file = event.target.files?.[0] - if (file) { - const fileDataUrl = await toBase64(file) - if (fileDataUrl) { - upload(fileDataUrl) - } - } - }, []) - - const onPaste = useCallback((event: ClipboardEvent) => { - const pasteUrl = event.clipboardData.getData('text') ?? '' - upload(pasteUrl) - }, []) - - const onEnter = useCallback((event: FormEvent) => { - event.preventDefault() - event.stopPropagation() - // @ts-ignore - const inputUrl = event.target.elements.image.value - if (inputUrl) { - upload(inputUrl) - } - }, []) - - const openVideo: MouseEventHandler = async (event) => { - event.stopPropagation() - setPanel('camera-mode') - } - - const onCapture = () => { - if (canvasRef.current && videoRef.current) { - const canvas = canvasRef.current - canvas.width = videoRef.current!.videoWidth - canvas.height = videoRef.current!.videoHeight - canvas.getContext('2d')?.drawImage(videoRef.current, 0, 0, canvas.width, canvas.height) - const cameraUrl = canvas.toDataURL('image/jpeg') - upload(cameraUrl) - } - } - - useEffect(() => { - const handleBlur = () => { - if (panel !== 'none') { - setPanel('none') - } - } - document.addEventListener('click', handleBlur) - return () => { - document.removeEventListener('click', handleBlur) - } - }, [panel]) - - useEffect(() => { - if (panel === 'camera-mode') { - navigator.mediaDevices.getUserMedia({ video: true, audio: false }) - .then(videoStream => { - mediaStream.current = videoStream - if (videoRef.current) { - videoRef.current.srcObject = videoStream - } - }) - } else { - if (mediaStream.current) { - mediaStream.current.getTracks().forEach(function(track) { - track.stop() - }) - mediaStream.current = undefined - } - } - }, [panel]) - - return ( -
    -
    panel === 'none' ? setPanel('normal') : setPanel('none')}>{children}
    -
    -
    -
    -

    添加图像

    -
    -
    - paste -
    - e.stopPropagation()} - /> -
    -
    -
    - - -
    -
    - {panel === 'camera-mode' &&
    -
    -
    -
    -
    -
    -
    -
    } -
    -
    - ) -} diff --git a/spaces/quidiaMuxgu/Expedit-SAM/AUTODESK.MAYA.V2011.SP1.WIN32-ISO Download Pc.md b/spaces/quidiaMuxgu/Expedit-SAM/AUTODESK.MAYA.V2011.SP1.WIN32-ISO Download Pc.md deleted file mode 100644 index ca5c95fb156db07d41039653e33b977ac9c66f79..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/AUTODESK.MAYA.V2011.SP1.WIN32-ISO Download Pc.md +++ /dev/null @@ -1,66 +0,0 @@ -

    AUTODESK.MAYA.V2011.SP1.WIN32-ISO Download Pc


    Download Zip ===== https://geags.com/2uCscL



    -
    -] AUTODESK.MAYA.V2011.SP1.WIN32-ISO Download Pc: - -Autodesk Maya: Beginners - -Autodesk Maya. It helps students to make 3D things. Learn the basis of the Autodesk Maya application and connect to 3D modeling and animation. - -Step 1: - -Download the Autodesk Maya 2011 iso file - -Click on Autodesk Maya.exe - -Step 2: - -Autodesk Maya Tutorial - -1) Open Autodesk Maya and start a new project. - -2) Let's play with the Maya's Maya window - -3) Let's get acquainted with the Viewer window. - -4) Let's edit a simple scene, create a new object and apply a material and texture. - -5) Let's open a file that we previously saved in the Maya's file system and add a simple tool to a new file in a new file in the Maya's scene. - -6) After completing this tutorial, you will be familiar with all the important aspects of Autodesk Maya and also the tools that you need to create your 3D model. - -7) We are currently testing the Maya functionality and issues in our computer. If you have some difficulties in Maya, please use the contact form to report your problems. - -Step 3: - -Autodesk Maya and the 'Create Scene' option - -After you opened Autodesk Maya, you can see that there is a 'create scene' option on the top menu bar on the Maya interface. - -Click on 'Create Scene' button. - -Step 4: - -Autodesk Maya opens a window for the editor. In this window you can see Maya's panels and windows. - -You can open multiple Maya files and views in the same time. - -Click on the 'Open Scene' button. - -Step 5: - -Here you can find all your current 3D objects in Maya. In the Maya editor there are 3D spaces and many 3D tools. - -Step 6: - -Now let's create a new 3D model and a new scene in Maya. - -Open the 'File' menu and click on the 'New' option. - -Step 7: - -The new scene is created. In the name of the scene you can see some important information about the scene you have just created. - -Name the scene 'test' 4fefd39f24
    -
    -
    -

    diff --git a/spaces/quidiaMuxgu/Expedit-SAM/DMIFIT Tool And HPBQ138EXE [TOP].md b/spaces/quidiaMuxgu/Expedit-SAM/DMIFIT Tool And HPBQ138EXE [TOP].md deleted file mode 100644 index 1597f74051703ef2bf109b523d52215938ba0607..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/DMIFIT Tool And HPBQ138EXE [TOP].md +++ /dev/null @@ -1,51 +0,0 @@ -
    -``` -

    How to Use DMIFIT Tool And HPBQ138EXE to Fix HP Laptop BIOS Issues

    - -

    If you have an HP laptop that shows an error message like "Product Information Not Valid" or "System Board (00A)" when you boot it up, you may need to use the DMIFIT tool and HPBQ138EXE to update the system board information. This article will explain what these tools are, how to get them, and how to use them.

    - -

    What are DMIFIT Tool And HPBQ138EXE?

    - -

    DMIFIT stands for Desktop Management Interface (DMI) Firmware Interface Tool. It is a utility that allows you to modify the DMI information stored in the BIOS of your HP laptop. DMI information includes things like product name, serial number, SKU, family, and UUID. Sometimes, this information may get corrupted or erased due to a faulty BIOS update, a motherboard replacement, or other reasons. This can cause your laptop to display an error message or fail to boot properly.

    -

    DMIFIT Tool And HPBQ138EXE


    Downloadhttps://geags.com/2uCqx7



    - -

    HPBQ138EXE is a specific version of the DMIFIT tool that works with certain models of HP laptops. It is a DOS-based program that you need to run from a bootable USB drive or CD. You can download it from the HP support community forum or other online sources.

    - -

    How to Get DMIFIT Tool And HPBQ138EXE?

    - -

    To get the DMIFIT tool and HPBQ138EXE, you need to follow these steps:

    - -
      -
    1. Create a bootable USB drive or CD with DOS. You can use a free program like Rufus or UNetbootin to do this.
    2. -
    3. Download the HPBQ138.zip file from the HP support community forum or other online sources. You can find the link in the references section below.
    4. -
    5. Extract the zip file and copy the HPBQ138.exe file to the root directory of your bootable USB drive or CD.
    6. -
    7. Insert the bootable USB drive or CD into your HP laptop and restart it.
    8. -
    9. Press F9 or Esc repeatedly during startup to enter the boot menu.
    10. -
    11. Select your bootable USB drive or CD from the list and press Enter.
    12. -
    13. Once in the DOS environment, type: "HPBQ138.exe" without the quotes. This should start the DMIFIT utility.
    14. -
    - -

    How to Use DMIFIT Tool And HPBQ138EXE?

    - -

    To use the DMIFIT tool and HPBQ138EXE, you need to follow these steps:

    - -
      -
    1. Once the DMIFIT utility starts, you will see a menu with several options. Select option 1: "Enter Serial Number".
    2. -
    3. Type in your laptop's serial number as shown on the back of your laptop or on a sticker under the battery. Press Enter.
    4. -
    5. Select option 2: "Enter Notebook Model".
    6. -
    7. Type in your laptop's model number as shown on the back of your laptop or on a sticker under the battery. Press Enter.
    8. -
    9. Select option 3: "Enter GUID Number".
    10. -
    11. Type in a random 32-digit hexadecimal number. You can use an online generator like this one: https://www.guidgenerator.com/. Press Enter.
    12. -
    13. Select option 4: "Enter UUID Number".
    14. -
    15. Select option 1: "Generate UUID". This will automatically create a unique 32-digit hexadecimal number for your laptop. Press Enter.
    16. -
    17. Select option 5: "Write Current MPM Values".
    18. -
    19. Select option 1: "Write All DMI Values". This will write all the information you entered to your laptop's BIOS. Press Enter.
    20. -
    21. Select option 6: "Exit".
    22. -
    23. Restart your laptop and check if the error message is gone.
    24. -
    - -

    Conclusion

    - -

    In this article, you learned how to use the DMIFIT tool and HPBQ138EXE to fix HP laptop BIOS issues. You also learned what these tools are, how to get them, and how to use them. By following these steps, you should be able

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Hidmaker Fs [((FULL)) Full Version] Downl.md b/spaces/quidiaMuxgu/Expedit-SAM/Hidmaker Fs [((FULL)) Full Version] Downl.md deleted file mode 100644 index 9ae15c4cb215301a50bb5655ac7aa441c0757d9f..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Hidmaker Fs [((FULL)) Full Version] Downl.md +++ /dev/null @@ -1,32 +0,0 @@ -

    Hidmaker Fs [FULL Version] Downl


    Download Zip ····· https://geags.com/2uCqbj



    - -hex files (Support for Windows XP, Vista, 7, 8, 8.1 and 10 - Full Speed only) and boot loader. - -Windows Tool-- Create your own custom, true custom windows app. Save the Hex file. It does work for all Windows Platforms. - -1.11 -- 8/25/2015. Added "USB with Kernel", but you have to manually put the INF into the Windows folder. Worked for USB Full Speed. - -1.10 -- 8/24/2015. Add "USB with SFS", but you have to manually put the INF into the Windows folder. Worked for USB Full Speed. - -1.9 - 8/23/2015. Added compatibility with Windows 7. Boot Loader .hex file now supports Windows 7. - -1.8 - 8/20/2015. Added Windows 8 Compatibility. "USB with USB" now works. "USB with SFS" now works. "USB with WinLoader" now works. "USB with Kernel" now works. "USB with USB Loader" now works. Support for Windows 10. Changed the Hex Converter to now support Windows 10 Loader. - -1.7 - 8/18/2015. Added support for Windows 8 boot loader and Windows 8 FAT32 boot loader. - -1.6 - 8/17/2015. Added support for Windows 8.1 and Windows 10 FAT32 boot loader. Now requires "USB with SFS" to work. - -1.5 - 8/15/2015. Added support for Windows 8.1 and Windows 10. Now requires "USB with SFS" to work. - -1.4 - 8/12/2015. Added support for Windows 8 boot loader and Windows 8 FAT32 boot loader. - -1.3 - 8/11/2015. Added support for Windows 8 boot loader and Windows 8 FAT32 boot loader. - -1.2 - 8/7/2015. Added support for Windows 8 FAT32 boot loader and Windows 8 FAT32 boot loader. Also, the interface now allows you to read all the bootloaders and the tables out of the flash memory. - -1.1 - 7/27/2015. Added support for Windows 8.1 boot loader and Windows 8 FAT32 boot loader. - -1.0 - 7/26/2015. First public version 4fefd39f24
    -
    -
    -

    diff --git a/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/tools/rvc_for_realtime.py b/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/tools/rvc_for_realtime.py deleted file mode 100644 index cbdb5cdc2ed77b6c5585e70a86fe39decb6a3e7c..0000000000000000000000000000000000000000 --- a/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/tools/rvc_for_realtime.py +++ /dev/null @@ -1,380 +0,0 @@ -import os -import sys -import traceback -import logging - -logger = logging.getLogger(__name__) - -from time import time as ttime - -import fairseq -import faiss -import numpy as np -import parselmouth -import pyworld -import scipy.signal as signal -import torch -import torch.nn.functional as F -import torchcrepe - -from lib.infer.infer_libs.infer_pack.models import ( - SynthesizerTrnMs256NSFsid, - SynthesizerTrnMs256NSFsid_nono, - SynthesizerTrnMs768NSFsid, - SynthesizerTrnMs768NSFsid_nono, -) - -now_dir = os.getcwd() -sys.path.append(now_dir) -from multiprocessing import Manager as M - -from assets.configs.config import Config - -config = Config() - -mm = M() -if config.dml == True: - - def forward_dml(ctx, x, scale): - ctx.scale = scale - res = x.clone().detach() - return res - - fairseq.modules.grad_multiply.GradMultiply.forward = forward_dml - - -# config.device=torch.device("cpu")########强制cpu测试 -# config.is_half=False########强制cpu测试 -class RVC: - def __init__( - self, - key, - pth_path, - index_path, - index_rate, - n_cpu, - inp_q, - opt_q, - device, - last_rvc=None, - ) -> None: - """ - 初始化 - """ - try: - global config - self.inp_q = inp_q - self.opt_q = opt_q - # device="cpu"########强制cpu测试 - self.device = device - self.f0_up_key = key - self.time_step = 160 / 16000 * 1000 - self.f0_min = 50 - self.f0_max = 1100 - self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700) - self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700) - self.sr = 16000 - self.window = 160 - self.n_cpu = n_cpu - if index_rate != 0: - self.index = faiss.read_index(index_path) - self.big_npy = self.index.reconstruct_n(0, self.index.ntotal) - logger.info("Index search enabled") - self.pth_path = pth_path - self.index_path = index_path - self.index_rate = index_rate - - if last_rvc is None: - models, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task( - ["assets/hubert/hubert_base.pt"], - suffix="", - ) - hubert_model = models[0] - hubert_model = hubert_model.to(device) - if config.is_half: - hubert_model = hubert_model.half() - else: - hubert_model = hubert_model.float() - hubert_model.eval() - self.model = hubert_model - else: - self.model = last_rvc.model - - if last_rvc is None or last_rvc.pth_path != self.pth_path: - cpt = torch.load(self.pth_path, map_location="cpu") - self.tgt_sr = cpt["config"][-1] - cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] - self.if_f0 = cpt.get("f0", 1) - self.version = cpt.get("version", "v1") - if self.version == "v1": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs256NSFsid( - *cpt["config"], is_half=config.is_half - ) - else: - self.net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - elif self.version == "v2": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs768NSFsid( - *cpt["config"], is_half=config.is_half - ) - else: - self.net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) - del self.net_g.enc_q - logger.debug(self.net_g.load_state_dict(cpt["weight"], strict=False)) - self.net_g.eval().to(device) - # print(2333333333,device,config.device,self.device)#net_g是device,hubert是config.device - if config.is_half: - self.net_g = self.net_g.half() - else: - self.net_g = self.net_g.float() - self.is_half = config.is_half - else: - self.tgt_sr = last_rvc.tgt_sr - self.if_f0 = last_rvc.if_f0 - self.version = last_rvc.version - self.net_g = last_rvc.net_g - self.is_half = last_rvc.is_half - - if last_rvc is not None and hasattr(last_rvc, "model_rmvpe"): - self.model_rmvpe = last_rvc.model_rmvpe - except: - logger.warn(traceback.format_exc()) - - def change_key(self, new_key): - self.f0_up_key = new_key - - def change_index_rate(self, new_index_rate): - if new_index_rate != 0 and self.index_rate == 0: - self.index = faiss.read_index(self.index_path) - self.big_npy = self.index.reconstruct_n(0, self.index.ntotal) - logger.info("Index search enabled") - self.index_rate = new_index_rate - - def get_f0_post(self, f0): - f0_min = self.f0_min - f0_max = self.f0_max - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int32) - return f0_coarse, f0bak - - def get_f0(self, x, f0_up_key, n_cpu, method="harvest"): - n_cpu = int(n_cpu) - if method == "crepe": - return self.get_f0_crepe(x, f0_up_key) - if method == "rmvpe": - return self.get_f0_rmvpe(x, f0_up_key) - if method == "pm": - p_len = x.shape[0] // 160 + 1 - f0 = ( - parselmouth.Sound(x, 16000) - .to_pitch_ac( - time_step=0.01, - voicing_threshold=0.6, - pitch_floor=50, - pitch_ceiling=1100, - ) - .selected_array["frequency"] - ) - - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - # print(pad_size, p_len - len(f0) - pad_size) - f0 = np.pad( - f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant" - ) - - f0 *= pow(2, f0_up_key / 12) - return self.get_f0_post(f0) - if n_cpu == 1: - f0, t = pyworld.harvest( - x.astype(np.double), - fs=16000, - f0_ceil=1100, - f0_floor=50, - frame_period=10, - ) - f0 = signal.medfilt(f0, 3) - f0 *= pow(2, f0_up_key / 12) - return self.get_f0_post(f0) - f0bak = np.zeros(x.shape[0] // 160 + 1, dtype=np.float64) - length = len(x) - part_length = 160 * ((length // 160 - 1) // n_cpu + 1) - n_cpu = (length // 160 - 1) // (part_length // 160) + 1 - ts = ttime() - res_f0 = mm.dict() - for idx in range(n_cpu): - tail = part_length * (idx + 1) + 320 - if idx == 0: - self.inp_q.put((idx, x[:tail], res_f0, n_cpu, ts)) - else: - self.inp_q.put( - (idx, x[part_length * idx - 320 : tail], res_f0, n_cpu, ts) - ) - while 1: - res_ts = self.opt_q.get() - if res_ts == ts: - break - f0s = [i[1] for i in sorted(res_f0.items(), key=lambda x: x[0])] - for idx, f0 in enumerate(f0s): - if idx == 0: - f0 = f0[:-3] - elif idx != n_cpu - 1: - f0 = f0[2:-3] - else: - f0 = f0[2:] - f0bak[ - part_length * idx // 160 : part_length * idx // 160 + f0.shape[0] - ] = f0 - f0bak = signal.medfilt(f0bak, 3) - f0bak *= pow(2, f0_up_key / 12) - return self.get_f0_post(f0bak) - - def get_f0_crepe(self, x, f0_up_key): - if "privateuseone" in str(self.device): ###不支持dml,cpu又太慢用不成,拿pm顶替 - return self.get_f0(x, f0_up_key, 1, "pm") - audio = torch.tensor(np.copy(x))[None].float() - # print("using crepe,device:%s"%self.device) - f0, pd = torchcrepe.predict( - audio, - self.sr, - 160, - self.f0_min, - self.f0_max, - "full", - batch_size=512, - # device=self.device if self.device.type!="privateuseone" else "cpu",###crepe不用半精度全部是全精度所以不愁###cpu延迟高到没法用 - device=self.device, - return_periodicity=True, - ) - pd = torchcrepe.filter.median(pd, 3) - f0 = torchcrepe.filter.mean(f0, 3) - f0[pd < 0.1] = 0 - f0 = f0[0].cpu().numpy() - f0 *= pow(2, f0_up_key / 12) - return self.get_f0_post(f0) - - def get_f0_rmvpe(self, x, f0_up_key): - if hasattr(self, "model_rmvpe") == False: - from lib.infer.infer_libs.rmvpe import RMVPE - - logger.info("Loading rmvpe model") - self.model_rmvpe = RMVPE( - # "rmvpe.pt", is_half=self.is_half if self.device.type!="privateuseone" else False, device=self.device if self.device.type!="privateuseone"else "cpu"####dml时强制对rmvpe用cpu跑 - # "rmvpe.pt", is_half=False, device=self.device####dml配置 - # "rmvpe.pt", is_half=False, device="cpu"####锁定cpu配置 - "%s/rmvpe.pt" % os.environ["rmvpe_root"], - is_half=self.is_half, - device=self.device, ####正常逻辑 - ) - # self.model_rmvpe = RMVPE("aug2_58000_half.pt", is_half=self.is_half, device=self.device) - f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) - f0 *= pow(2, f0_up_key / 12) - return self.get_f0_post(f0) - - def infer( - self, - feats: torch.Tensor, - indata: np.ndarray, - block_frame_16k, - rate, - cache_pitch, - cache_pitchf, - f0method, - ) -> np.ndarray: - feats = feats.view(1, -1) - if config.is_half: - feats = feats.half() - else: - feats = feats.float() - feats = feats.to(self.device) - t1 = ttime() - with torch.no_grad(): - padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) - inputs = { - "source": feats, - "padding_mask": padding_mask, - "output_layer": 9 if self.version == "v1" else 12, - } - logits = self.model.extract_features(**inputs) - feats = ( - self.model.final_proj(logits[0]) if self.version == "v1" else logits[0] - ) - feats = F.pad(feats, (0, 0, 1, 0)) - t2 = ttime() - try: - if hasattr(self, "index") and self.index_rate != 0: - leng_replace_head = int(rate * feats[0].shape[0]) - npy = feats[0][-leng_replace_head:].cpu().numpy().astype("float32") - score, ix = self.index.search(npy, k=8) - weight = np.square(1 / score) - weight /= weight.sum(axis=1, keepdims=True) - npy = np.sum(self.big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) - if config.is_half: - npy = npy.astype("float16") - feats[0][-leng_replace_head:] = ( - torch.from_numpy(npy).unsqueeze(0).to(self.device) * self.index_rate - + (1 - self.index_rate) * feats[0][-leng_replace_head:] - ) - else: - logger.warn("Index search FAILED or disabled") - except: - traceback.print_exc() - logger.warn("Index search FAILED") - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - t3 = ttime() - if self.if_f0 == 1: - pitch, pitchf = self.get_f0(indata, self.f0_up_key, self.n_cpu, f0method) - start_frame = block_frame_16k // 160 - end_frame = len(cache_pitch) - (pitch.shape[0] - 4) + start_frame - cache_pitch[:] = np.append(cache_pitch[start_frame:end_frame], pitch[3:-1]) - cache_pitchf[:] = np.append( - cache_pitchf[start_frame:end_frame], pitchf[3:-1] - ) - p_len = min(feats.shape[1], 13000, cache_pitch.shape[0]) - else: - cache_pitch, cache_pitchf = None, None - p_len = min(feats.shape[1], 13000) - t4 = ttime() - feats = feats[:, :p_len, :] - if self.if_f0 == 1: - cache_pitch = cache_pitch[:p_len] - cache_pitchf = cache_pitchf[:p_len] - cache_pitch = torch.LongTensor(cache_pitch).unsqueeze(0).to(self.device) - cache_pitchf = torch.FloatTensor(cache_pitchf).unsqueeze(0).to(self.device) - p_len = torch.LongTensor([p_len]).to(self.device) - ii = 0 # sid - sid = torch.LongTensor([ii]).to(self.device) - with torch.no_grad(): - if self.if_f0 == 1: - # print(12222222222,feats.device,p_len.device,cache_pitch.device,cache_pitchf.device,sid.device,rate2) - infered_audio = ( - self.net_g.infer( - feats, p_len, cache_pitch, cache_pitchf, sid, rate - )[0][0, 0] - .data - .float() - ) - else: - infered_audio = ( - self.net_g.infer(feats, p_len, sid, rate)[0][0, 0] - .data - .float() - ) - t5 = ttime() - logger.info( - "Spent time: fea = %.2fs, index = %.2fs, f0 = %.2fs, model = %.2fs", - t2 - t1, - t3 - t2, - t4 - t3, - t5 - t4, - ) - return infered_audio \ No newline at end of file diff --git a/spaces/r3gm/Fast_Stable_diffusion_CPU/easy_run.py b/spaces/r3gm/Fast_Stable_diffusion_CPU/easy_run.py deleted file mode 100644 index 93fdd0485e8844a40933367ccf10edd4bf4c92f1..0000000000000000000000000000000000000000 --- a/spaces/r3gm/Fast_Stable_diffusion_CPU/easy_run.py +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env python3 -# import pipeline and scheduler from https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7/ -from lcm_pipeline import LatentConsistencyModelPipeline -from lcm_scheduler import LCMScheduler -import hf_image_uploader as hiu -import torch - -scheduler = LCMScheduler.from_pretrained("SimianLuo/LCM_Dreamshaper_v7", subfolder="scheduler") - -pipe = LatentConsistencyModelPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7", scheduler=scheduler) -pipe.to("cuda", dtype=torch.float16) - -prompt = "a red horse" -images = pipe(prompt=prompt, guidance_scale=8.0, num_inference_steps=4, lcm_origin_steps=50, output_type="pil").images - -for image in images: - hiu.upload(image, "patrickvonplaten/images") diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Bibliocad Vip Account 16 Everything You Need to Know About This Amazing Resource.md b/spaces/raedeXanto/academic-chatgpt-beta/Bibliocad Vip Account 16 Everything You Need to Know About This Amazing Resource.md deleted file mode 100644 index 90c72a00b82706f520d76d66bb7bb3c6dc8b5614..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Bibliocad Vip Account 16 Everything You Need to Know About This Amazing Resource.md +++ /dev/null @@ -1,236 +0,0 @@ - -

    Bibliocad Vip Account 16: What Is It and Why You Need It

    -

    If you are a professional or a student in the fields of architecture, engineering, design, or construction, you probably know how important it is to have access to a large and diverse collection of CAD files for your projects. CAD files are digital representations of physical objects, such as buildings, furniture, vehicles, machines, etc., that can be used for designing, modeling, drafting, or rendering purposes.

    -

    One of the best online sources of CAD files is Bibliocad, a website that offers more than 150,000 CAD files in various formats, such as DWG, DXF, PDF, RVT, SKP, etc. Bibliocad has been around since 1999 and has grown to become one of the most popular and trusted online CAD libraries in the world. Bibliocad has files for every category and subcategory you can think of, such as architecture, urbanism, landscaping, civil engineering, mechanical engineering, electrical engineering, plumbing, HVAC, furniture, interior design, vehicles, etc.

    -

    Bibliocad Vip Account 16


    DOWNLOADhttps://tinourl.com/2uKZ8A



    -

    But what if you want more than just access to thousands of CAD files? What if you want to enjoy exclusive features and benefits that will make your work easier and faster? What if you want to be part of a community of professionals and enthusiasts who share their knowledge and experience with each other? That's where Bibliocad Vip Account 16 comes in.

    -

    What is a Vip account and what are its benefits?

    -

    A Vip account is a premium subscription service that gives you unlimited access to the entire Bibliocad library, including more than 50,000 Vip files that are not available for free users. These Vip files are high-quality CAD files that have been carefully selected and verified by the Bibliocad team. They are also updated regularly to ensure that they meet the latest standards and requirements of the industry.

    -

    With a Vip account, you can download as many files as you want without any restrictions or limitations. You can also request custom files from the Bibliocad team if you can't find what you need in the library. Moreover, you can enjoy faster download speeds and better file formats than free users.

    -

    But that's not all. A Vip account also gives you the opportunity to upload and share your own CAD files with the Bibliocad community. You can showcase your work, get feedback from other users, and earn points that you can redeem for prizes or discounts. You can also participate in contests and challenges that are organized by Bibliocad every month.

    -

    By becoming a Vip user, you also support the Bibliocad project and help it grow and improve. You contribute to the maintenance and development of the website and its services. You also help other users who rely on Bibliocad for their projects.

    -

    How to get a Vip account 16 and what are its costs?

    -

    Getting a Vip account 16 is very easy and convenient. You just need to visit this page and choose the plan that suits your needs and budget. You can choose from three options:

    -
      -
    • Monthly plan: This plan costs $14.99 per month and gives you access to all the Vip features for one month. You can cancel anytime before the next billing cycle.
    • -
    • Annual plan: This plan costs $99 per year and gives you access to all the Vip features for one year. You save 45% compared to the monthly plan. You can cancel anytime before the next billing cycle.
    • -
    • Lifetime plan: This plan costs $199 one-time payment and gives you access to all the Vip features for life. You save 87% compared to the monthly plan. This is the best value option if you plan to use Bibliocad for a long time.
    • -
    -

    Once you choose your plan, you can pay with any major credit card or PayPal. You will receive an email confirmation with your login details and instructions on how to activate your account. You can start using your Vip account immediately after payment.

    -

    How to Use Bibliocad Vip Account 16 Effectively

    -

    How to access and download files from the Vip library

    -

    To access and download files from the Vip library, you need to log in with your email and password on Bibliocad's homepage. Once you are logged in, you will see a green badge next to your name that indicates that you are a Vip user.

    -

    How to get Bibliocad Vip Account 16 for free
    -Bibliocad Vip Account 16 login and password
    -Bibliocad Vip Account 16 discount code
    -Bibliocad Vip Account 16 review and features
    -Bibliocad Vip Account 16 vs other CAD software
    -Bibliocad Vip Account 16 download and installation
    -Bibliocad Vip Account 16 tutorial and tips
    -Bibliocad Vip Account 16 alternatives and competitors
    -Bibliocad Vip Account 16 price and plans
    -Bibliocad Vip Account 16 refund policy and customer service
    -Benefits of using Bibliocad Vip Account 16
    -Best practices for using Bibliocad Vip Account 16
    -How to upgrade from Bibliocad Vip Account 15 to 16
    -How to cancel Bibliocad Vip Account 16 subscription
    -How to access Bibliocad Vip Account 16 library and resources
    -How to share Bibliocad Vip Account 16 projects and files
    -How to customize Bibliocad Vip Account 16 settings and preferences
    -How to solve common problems with Bibliocad Vip Account 16
    -How to import and export files with Bibliocad Vip Account 16
    -How to use Bibliocad Vip Account 16 on different devices and platforms
    -How to collaborate with others using Bibliocad Vip Account 16
    -How to create and edit drawings with Bibliocad Vip Account 16
    -How to use advanced tools and features with Bibliocad Vip Account 16
    -How to optimize performance and speed with Bibliocad Vip Account 16
    -How to secure your data and privacy with Bibliocad Vip Account 16
    -How to learn new skills and techniques with Bibliocad Vip Account 16
    -How to find inspiration and ideas with Bibliocad Vip Account 16
    -How to join and participate in the Bibliocad community with Vip Account 16
    -How to get feedback and support with Bibliocad Vip Account 16
    -How to make money with Bibliocad Vip Account 16
    -What are the pros and cons of using Bibliocad Vip Account 16
    -What are the differences between Bibliocad Vip Account 16 and regular account
    -What are the requirements and specifications for using Bibliocad Vip Account 16
    -What are the latest updates and news about Bibliocad Vip Account 16
    -What are the best sources and guides for learning more about Bibliocad Vip Account 16
    -What are the most popular and useful projects created with Bibliocad Vip Account 16
    -What are the best examples and testimonials of using Bibliocad Vip Account 16
    -What are the most frequently asked questions about Bibliocad Vip Account 16
    -What are the best practices for SEO with Bibliocad Vip Account 16
    -What are the best niches and industries for using Bibliocad Vip Account 16
    -Who are the target audience and customers for Bibliocad Vip Account 16
    -Who are the founders and developers of Bibliocad Vip Account 16
    -Who are the partners and affiliates of Bibliocad Vip Account 16
    -Who are the influencers and experts of Bibliocad Vip Account 16
    -Why should you choose Bibliocad Vip Account 16 over other options
    -Why is Bibliocad Vip Account 16 worth the investment
    -Why is Bibliocad Vip Account 16 the best CAD software in the market
    -When is the best time to buy or renew your Bibliocad Vip Account 16
    -When is the next release or update of Bibliocad Vip Account 16

    -

    You can browse through the categories and subcategories on the left sidebar or use the search bar on the top right corner to find the files you need. You can also filter by file format, date uploaded, rating, popularity, etc.

    -

    When you find a file that interests you, click on it to see more details about it. You will see a preview image, a description, a list of keywords, a file size indicator, a download button, and other information such as comments or ratings from other users.

    -

    To download a file, simply click on the green download button. You will be redirected to a page where you can choose between two options:

    -
      -
    • Download now: This option allows you to download the file immediately without waiting or entering any captcha code.
    • -
    • Add to cart: This option allows you to add multiple files to your cart and download them later in bulk.
    • -
    -

    You can also add files to your favorites list by clicking on the heart icon next to them. This way, you can easily access them later from your profile page.

    -

    How to upload and share your own files with the Vip community

    -

    If you want to upload and share your own CAD files with the Bibliocad community, you need to go to this page and follow these steps:

    -
      -
    1. Select your file: Click on the browse button and choose the file from your computer or drag and drop it into the box. The file must be in one of these formats: DWG (AutoCAD), DXF (AutoCAD), PDF (Adobe), RVT (Revit), SKP (SketchUp), DWF (AutoCAD), DGN (MicroStation), IFC (Industry Foundation Classes), RFA (Revit Family), GSM (ArchiCAD), PLN (ArchiCAD), MAX (3ds Max), OBJ (Wavefront), STL (Stereolithography), KMZ (Google Earth), JPG (Image), PNG (Image).
    2. -
    3. Add details: Fill in the required fields such as title, description, keywords, category, subcategory, language, and license. You can also add optional fields such as author, website, email, phone, etc. Make sure to provide accurate and relevant information about your file. This will help other users find and use it properly.
    4. -will be submitted for review by the Bibliocad team. They will check if it meets the quality and originality standards of the website. If it does, it will be published and available for download by other users. If it doesn't, it will be rejected and you will receive an email with the reasons why. -
    -

    By uploading and sharing your files, you can earn points that you can redeem for prizes or discounts on your Vip subscription. You can also receive comments and ratings from other users who download your files. This will help you improve your skills and reputation as a CAD professional.

    -

    How to manage your account settings and preferences

    -

    To manage your account settings and preferences, you need to go to this page and log in with your email and password. You will see a dashboard with various options to customize your profile and account.

    -

    Some of the things you can do are:

    -
      -
    • Edit your profile: You can change your name, email, password, country, language, avatar, bio, etc.
    • -
    • View your activity: You can see your download history, upload history, favorites list, points balance, etc.
    • -
    • Manage your subscription: You can see your current plan, renewal date, payment method, etc. You can also cancel or change your plan anytime.
    • -
    • Contact support: You can send a message to the Bibliocad team if you have any questions, suggestions, complaints, or feedback.
    • -
    -

    You can also access these options from the menu icon on the top right corner of any page on Bibliocad.

    -

    Tips and Tricks for Bibliocad Vip Account 16 Users

    -

    How to find and use the best files for your projects

    -

    With more than 150,000 CAD files available on Bibliocad, finding and using the best ones for your projects can be challenging. Here are some tips and tricks to help you out:

    -
      -
    • Use the search bar: The search bar is the easiest and fastest way to find what you need. You can type keywords related to your project or file format. You can also use quotation marks to search for exact phrases or use operators such as AND, OR, NOT to refine your search.
    • -
    • Use the filters: The filters allow you to narrow down your results by category, subcategory, file format, date uploaded, rating, popularity, etc. You can also sort your results by relevance, newest, oldest, most downloaded, most rated, etc.
    • -
    • Use the preview image: The preview image gives you a glimpse of what the file looks like before downloading it. You can zoom in or out to see more details or click on it to see a larger version.
    • -
    • Use the description: The description provides more information about the file such as its dimensions, scale, units, layers, colors, etc. You can also see the keywords that the uploader used to tag the file. This will help you understand the file's content and purpose.
    • -
    • Use the comments and ratings: The comments and ratings from other users who downloaded the file can give you an idea of its quality and usefulness. You can also leave your own comment or rating after downloading a file to share your feedback with the uploader and other users.
    • -
    -

    How to optimize your files for faster downloads and better quality

    -

    If you want to optimize your files for faster downloads and better quality, here are some tips and tricks to follow:

    -
      -
    • Reduce file size: The smaller the file size, the faster it will download. You can reduce file size by deleting unnecessary elements such as blocks, hatches, images, text, etc. You can also use the purge, audit, or overkill commands in AutoCAD to clean up your file. Another option is to use a file compression software such as WinZip or 7-Zip to zip your file before uploading it.
    • -
    • Increase file resolution: The higher the file resolution, the better it will look. You can increase file resolution by using higher quality settings when saving or exporting your file. For example, if you are saving a PDF file, you can choose a higher DPI (dots per inch) value or a higher quality preset. If you are saving a JPG or PNG image, you can choose a higher pixel dimension or a lower compression level.
    • -
    • Avoid errors and issues: Errors and issues in your file can cause problems when downloading or opening it. You can avoid errors and issues by following these steps:
        -
      1. Check compatibility: Make sure that your file is compatible with the software that you or other users will use to open it. For example, if you are uploading a DWG file, make sure that it is saved in a version that is supported by AutoCAD or other CAD software. If you are uploading a PDF file, make sure that it is not password-protected or encrypted.
      2. -
      3. Check integrity: Make sure that your file is not corrupted or damaged. You can check integrity by opening your file in your software and looking for any errors or warnings. You can also use a file verification tool such as Xanadu to check if your DWG file is valid or not.
      4. -
      5. Add metadata: Metadata is information that describes your file such as title, author, description, keywords, etc. Adding metadata to your file will help other users find and use it properly. You can add metadata to your file using your software's properties or options menu or using a metadata editor tool such as Total PDF Printer.
      6. -
    • -
    -

    How to avoid common errors and issues with Bibliocad Vip Account 16

    -

    If you encounter any errors or issues with Bibliocad Vip Account 16, here are some tips and tricks to solve them:

    -
      -
    • Login problems: If you have trouble logging in to your Vip account, make sure that you are using the correct email and password that you registered with. If you forgot your password, you can request a reset link by clicking on this page. If you still can't log in, contact support at support@bibliocad.com.
    • -
    • Download problems: If you have trouble downloading files from the Vip library, make sure that you have enough space on your device or storage media. If you get an error message such as "File not found" or "Access denied", try refreshing the page or clearing your browser cache. If you still can't download files, contact support at support@bibliocad.com.
    • -
    • Upload problems: If you have trouble uploading files to the Vip library, make sure that your file meets the requirements of format, size, quality, and originality. If you get an error message such as "File too large" or "File not supported", try reducing your file size or changing your file format. If you still can't upload files, contact support at support@bibliocad.com.
    • -
    • Billing problems: If you have trouble with your payment method or subscription plan, make sure that you have enough funds on your credit card or PayPal account. If you get an error message such as "Payment declined" or "Subscription expired", try updating your payment information or renewing your plan. If you still have billing problems, contact support at support@bibliocad.com.
    • -
    -

    Bibliocad Vip Account 16 vs Other Online CAD Libraries

    -

    How Bibliocad compares to other popular online CAD libraries

    -

    Bibliocad is not the only online CAD library available on the internet. There are many other websites that offer similar services and features. Some of the most popular ones are:

    - - - - - - - - - - -Stereolithography), STEP (Standard for the Exchange of Product Data), IGES (Initial Graphics Exchange Specification), etc. GrabCAD also offers a cloud-based collaboration platform called GrabCAD Workbench that allows users to manage and share their CAD projects with others. - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameDescriptionPricingMain featuresMain drawbacks
    GrabCADFree- Large and diverse collection of CAD files - User-friendly interface and search engine - Cloud-based collaboration platform - Online community and forums- Limited file formats and categories - Variable file quality and accuracy - No premium or exclusive features
    CGTraderA website that offers more than 1 million 3D models in various formats such as OBJ (Wavefront), FBX (Autodesk), 3DS (3D Studio), etc. CGTrader also offers a marketplace where users can buy and sell their 3D models for various purposes such as gaming, animation, printing, etc.Free and paid- Large and diverse collection of 3D models - User-friendly interface and search engine - Marketplace for buying and selling 3D models - Online community and contests- Limited file formats and categories - Variable file quality and accuracy - No premium or exclusive features
    TracePartsA website that offers more than 100 million CAD files in various formats such as DWG (AutoCAD), DXF (AutoCAD), STEP (Standard for the Exchange of Product Data), IGES (Initial Graphics Exchange Specification), etc. TraceParts also offers a catalog of industrial products from more than 800 suppliers and manufacturers.Free and paid- Large and diverse collection of CAD files - User-friendly interface and search engine - Catalog of industrial products - Online community and newsletters- Limited file formats and categories - Variable file quality and accuracy - No premium or exclusive features
    CAD Blocks FreeA website that offers more than 10,000 free CAD files in various formats such as DWG (AutoCAD), DXF (AutoCAD), RVT (Revit), SKP (SketchUp), etc. CAD Blocks Free also offers a blog with tips and tutorials on CAD software and design.Free- Large and diverse collection of CAD files - User-friendly interface and search engine - Blog with tips and tutorials - Online community and forums- Limited file formats and categories - Variable file quality and accuracy - No premium or exclusive features
    -

    What are the advantages and disadvantages of each option?

    -

    Each online CAD library has its own advantages and disadvantages depending on your needs and preferences. Here are some of the main pros and cons of each option:

    - - - - - - - - - - - - - - - - - - - - - - - -- User-friendly interface and search engine - Catalog of industrial products - Online community and newsletters - - - - - - - -
    NameAdvantagesDisadvantages
    Bibliocad Vip Account 16- Unlimited access to more than 150,000 CAD files including 50,000 Vip files - Exclusive features and benefits such as faster downloads, custom files, contests, etc. - Opportunity to upload and share your own files with the community - Support for the Bibliocad project- Requires a paid subscription plan - Not all file formats and categories are available - Some files may be outdated or inaccurate
    GrabCAD- Free access to more than 4 million CAD files - User-friendly interface and search engine - Cloud-based collaboration platform - Online community and forums- Limited file formats and categories - Variable file quality and accuracy - No premium or exclusive features
    CGTrader- Free and paid access to more than 1 million 3D models - User-friendly interface and search engine - Marketplace for buying and selling 3D models - Online community and contests- Limited file formats and categories - Variable file quality and accuracy - No premium or exclusive features
    TraceParts- Limited file formats and categories - Variable file quality and accuracy - No premium or exclusive features
    CAD Blocks Free- Free access to more than 10,000 CAD files - User-friendly interface and search engine - Blog with tips and tutorials - Online community and forums- Limited file formats and categories - Variable file quality and accuracy - No premium or exclusive features
    -

    How to choose the best online CAD library for your needs?

    -

    To choose the best online CAD library for your needs, you need to consider several factors such as:

    -
      -
    • Your budget: How much are you willing to spend on an online CAD library? Do you prefer a free or a paid service? Do you want a one-time payment or a recurring subscription?
    • -
    • Your project: What kind of CAD files do you need for your project? What file formats and categories are you looking for? How many files do you need to download or upload?
    • -
    • Your preference: What features and benefits do you value the most in an online CAD library? Do you want exclusive access, faster downloads, custom files, etc.? Do you want to be part of a community, participate in contests, etc.?
    • -
    • Your experience: How familiar are you with CAD software and design? How comfortable are you with using online services and platforms? How much support and guidance do you need?
    • -
    -

    Based on these factors, you can compare and contrast the different options available and choose the one that best suits your needs and preferences. You can also try out different options before making a final decision.

    -

    Conclusion

    -

    Bibliocad Vip Account 16 is a premium subscription service that gives you unlimited access to more than 150,000 CAD files, including 50,000 Vip files that are not available for free users. It also gives you exclusive features and benefits such as faster downloads, custom files, contests, etc. It also gives you the opportunity to upload and share your own files with the Bibliocad community. By becoming a Vip user, you also support the Bibliocad project and help it grow and improve.

    -

    Bibliocad Vip Account 16 is one of the best online CAD libraries available on the internet. It offers a large and diverse collection of CAD files for every category and subcategory you can think of. It also offers a user-friendly interface and search engine that makes it easy to find and use the files you need. It also offers a blog with tips and tutorials on CAD software and design.

    -

    If you are a professional or a student in the fields of architecture, engineering, design, or construction, Bibliocad Vip Account 16 is a valuable resource that will help you with your projects. It will save you time, money, and effort by providing you with high-quality CAD files that you can download or upload anytime. It will also enhance your skills and reputation by allowing you to showcase your work, get feedback from other users, and participate in contests.

    -

    If you want to get started with Bibliocad Vip Account 16 today, all you need to do is visit this page and choose the plan that suits your needs and budget. You can pay with any major credit card or PayPal. You will receive an email confirmation with your login details and instructions on how to activate your account. You can start using your Vip account immediately after payment.

    -

    Don't miss this opportunity to access one of the best online CAD libraries in the world. Join Bibliocad Vip Account 16 today and enjoy all the benefits it has to offer.

    -

    FAQs

    -

    What is Bibliocad?

    -

    Bibliocad is a website that offers more than 150,000 CAD files in various formats such as DWG (AutoCAD), DXF (AutoCAD), PDF (Adobe), RVT (Revit), SKP (SketchUp), etc. Bibliocad has files for every category and subcategory such as architecture, urbanism, landscaping, civil engineering, mechanical engineering, electrical engineering, plumbing, HVAC, furniture, interior design, vehicles, etc.

    -

    What is a Vip account?

    -

    A Vip account is a premium subscription service that gives you unlimited access to the entire Bibliocad library, including more than 50,000 Vip files that are not available for free users. These Vip files are high-quality CAD files that have been carefully selected and verified by the Bibliocad team. They are also updated regularly to ensure that they meet the latest standards and requirements of the industry.

    -

    What are the benefits of a Vip account?

    -

    With a Vip account, you can download as many files as you want without any restrictions or limitations. You can also request custom files from the Bibliocad team if you can't find what you need in the library. Moreover, you can enjoy faster download speeds and better file formats than free users.

    -

    But that's not all. A Vip account also gives you the opportunity to upload and share your own CAD files with the Bibliocad community. You can showcase your work, get feedback from other users, and earn points that you can redeem for prizes or discounts. You can also participate in contests and challenges that are organized by Bibliocad every month.

    -

    By becoming a Vip user, you also support the Bibliocad project and help it grow and improve. You contribute to the maintenance and development of the website and its services. You also help other users who rely on Bibliocad for their projects.

    -

    How much does a Vip account cost?

    -

    You can choose from three options:

    -
      -
    • Monthly plan: This plan costs $14.99 per month and gives you access to all the Vip features for one month. You can cancel anytime before the next billing cycle.
    • -
    • Annual plan: This plan costs $99 per year and gives you access to all the Vip features for one year. You save 45% compared to the monthly plan. You can cancel anytime before the next billing cycle.
    • -
    • Lifetime plan: This plan costs $199 one-time payment and gives you access to all the Vip features for life. You save 87% compared to the monthly plan. This is the best value option if you plan to use Bibliocad for a long time.
    • -
    -

    How do I get a Vip account?

    -

    To get a Vip account, you just need to visit this page and choose the plan that suits your needs and budget. You can pay with any major credit card or PayPal. You will receive an email confirmation with your login details and instructions on how to activate your account. You can start using your Vip account immediately after payment.

    -

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Greys Anatomy Season 3 Download Kickass Tips and Tricks for a Smooth Download.md b/spaces/raedeXanto/academic-chatgpt-beta/Greys Anatomy Season 3 Download Kickass Tips and Tricks for a Smooth Download.md deleted file mode 100644 index 449c9fa78a748e4f1d1df3143f31df266ccb5b20..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Greys Anatomy Season 3 Download Kickass Tips and Tricks for a Smooth Download.md +++ /dev/null @@ -1,104 +0,0 @@ - -

    Grey's Anatomy Season 3 Download Kickass: How to Watch the Medical Drama Online

    -

    If you are a fan of medical dramas, you have probably heard of Grey's Anatomy. This show has been running for 18 seasons and counting, and it has won numerous awards and accolades. But if you want to watch one of the most acclaimed and emotional seasons of the show, you might be wondering how to download Grey's Anatomy season 3 from Kickass. In this article, we will tell you everything you need to know about this season, its main characters, its best episodes, and how to watch it online.

    -

    Introduction

    -

    What is Grey's Anatomy?

    -

    Grey's Anatomy is a TV series created by Shonda Rhimes that follows the lives and careers of surgical interns, residents, and attendings at the fictional Seattle Grace Hospital. The show focuses on their personal and professional challenges, their relationships, their ethical dilemmas, and their medical cases. The show is named after Meredith Grey, one of the main characters and narrators of the series.

    -

    grey's anatomy season 3 download kickass


    Downloadhttps://tinourl.com/2uL0d4



    -

    Why is season 3 so popular?

    -

    Season 3 of Grey's Anatomy is widely considered as one of the best seasons of the show. It aired from September 2006 to May 2007 and it had 25 episodes. This season was full of drama, romance, humor, and tragedy. It featured some of the most memorable moments and storylines of the series, such as the ferry boat crash, the death of Denny Duquette, the wedding of Burke and Cristina, and the love triangle between Meredith, Derek, and Finn. It also introduced some new characters, such as Mark Sloan, Addison Montgomery's former lover and Derek's best friend; Callie Torres, an orthopedic surgeon who marries George O'Malley; and Erica Hahn, a cardiothoracic surgeon who clashes with Cristina.

    -

    How to download Grey's Anatomy season 3 from Kickass?

    -

    If you want to watch Grey's Anatomy season 3 online, you have several options. You can stream it on platforms like Netflix, Hulu, or Amazon Prime Video. You can also buy or rent it on iTunes, Google Play, or YouTube. But if you want to download it for free, you might want to use a torrent site like Kickass. Kickass is one of the most popular and reliable torrent sites that offers a wide range of movies and TV shows. To download Grey's Anatomy season 3 from Kickass, you need to follow these steps:

    -
      -
    1. Go to https://kickass.sx/ and search for "Grey's Anatomy season 3".
    2. -
    3. Choose a torrent that has a high number of seeders and leechers. This means that more people are sharing and downloading the file, which makes it faster and safer.
    4. -
    5. Click on the torrent link and download the .torrent file or copy the magnet link.
    6. -
    7. Open the .torrent file or paste the magnet link in a torrent client like uTorrent or BitTorrent.
    8. -
    9. Select the files you want to download and start the download process.
    10. -
    11. Wait until the download is complete and enjoy watching Grey's Anatomy season 3.
    12. -
    -

    Note: Downloading copyrighted content from torrent sites is illegal and risky. You might face legal consequences or get infected by malware. To protect yourself from these dangers, you should use a VPN service that hides your IP address and encrypts your traffic. A VPN also allows you to bypass geo-restrictions and access content that is not available in your region.

    -

    The Main Characters of Grey's Anatomy Season 3

    -

    Meredith Grey

    -

    Meredith Grey is a surgical intern who is in love with Derek Shepherd, a neurosurgeon who is married to Addison Montgomery, a neonatal surgeon. Meredith struggles with her feelings for Derek and her commitment issues. She also deals with her complicated family history, her friendship with Cristina Yang, and her rivalry with Ellis Grey, her mother who suffers from Alzheimer's disease.

    -

    Derek Shepherd

    -

    Derek Shepherd is a renowned neurosurgeon who is nicknamed "McDreamy" by the interns. He is torn between his wife Addison and his lover Meredith. He also faces some professional challenges when he operates on Richard Webber, the chief of surgery who has a brain tumor; when he loses his best friend Mark Sloan as a patient; and when he tries to save Meredith from drowning.

    -

    Cristina Yang

    -

    Cristina Yang is a competitive and ambitious surgical intern who specializes in cardiothoracic surgery. She is best friends with Meredith and has a romantic relationship with Preston Burke, an attending who mentors her. Cristina has a difficult pregnancy that ends in a miscarriage; she also suffers from post-traumatic stress disorder after being shot by a gunman in the hospital.

    -

    Izzie Stevens

    -

    Izzie Stevens is a compassionate and optimistic surgical intern who has a background as a model. She falls in love with Denny Duquette, a patient who needs a heart transplant. She goes to extreme lengths to save him by cutting his LVAD wire; however, he dies shortly after proposing to her. Izzie also has a brief affair with Alex Karev, another intern who breaks her heart.

    -

    Alex Karev

    -

    Alex Karev is a confident and arrogant surgical intern who has a talent for pediatric surgery. He has a troubled past that makes him insensitive and rude at times. He cheats on Izzie with Olivia Harper, a nurse who gives him syphilis; he also sleeps with Addison Montgomery as a rebound. He eventually develops feelings for Izzie but he pushes her away when he learns about her involvement with Denny.

    -

    grey's anatomy season 3 torrent magnet link
    -grey's anatomy season 3 episodes free download
    -grey's anatomy season 3 full hd download
    -grey's anatomy season 3 online streaming kickass
    -grey's anatomy season 3 subtitles download
    -grey's anatomy season 3 zip file download
    -grey's anatomy season 3 watch online free
    -grey's anatomy season 3 dvdrip download
    -grey's anatomy season 3 best quality download
    -grey's anatomy season 3 complete series download
    -grey's anatomy season 3 kickass torrentz2
    -grey's anatomy season 3 all episodes download
    -grey's anatomy season 3 mp4 download
    -grey's anatomy season 3 direct download link
    -grey's anatomy season 3 mkv download
    -grey's anatomy season 3 1080p download
    -grey's anatomy season 3 kickass proxy
    -grey's anatomy season 3 unblocked download
    -grey's anatomy season 3 rarbg download
    -grey's anatomy season 3 yify download
    -grey's anatomy season 3 x264 download
    -grey's anatomy season 3 hdtv download
    -grey's anatomy season 3 kickass mirror
    -grey's anatomy season 3 fast download
    -grey's anatomy season 3 no ads download
    -grey's anatomy season 3 extratorrents download
    -grey's anatomy season 3 limetorrents download
    -grey's anatomy season 3 eztv download
    -grey's anatomy season 3 iso download
    -grey's anatomy season 3 avi download
    -grey's anatomy season 3 kickass alternative
    -grey's anatomy season 3 bittorrent download
    -grey's anatomy season 3 webrip download
    -grey's anatomy season 3 bluray download
    -grey's anatomy season 3 kickass working site
    -grey's anatomy season 3 english audio download
    -grey's anatomy season 3 dual audio download
    -grey's anatomy season 3 hevc download
    -grey's anatomy season 3 xvid download
    -grey's anatomy season 3 kickass unblocked site
    -grey's anatomy season 3 srt file download
    -grey's anatomy season 3 mobile friendly download
    -grey's anatomy season 3 low size download
    -grey's anatomy season 3 high speed download
    -grey's anatomy season 3 kickass verified torrents
    -grey's anatomy season 3 original soundtrack download
    -grey's anatomy season 3 behind the scenes download
    -grey's anatomy season 3 deleted scenes download
    -grey's anatomy season 3 gag reel download

    -

    The Best Episodes of Grey's Anatomy Season 3

    -

    From a Whisper to a Scream

    - 2006. It revolves around the aftermath of a massive train crash that brings many victims to the hospital. The episode showcases the skills and emotions of the doctors as they deal with the crisis. Some of the highlights of this episode are: - Meredith and Derek perform a risky surgery on a man who has a pole impaled through his chest and abdomen. They have to decide which one of the two patients attached to the pole will survive. - Izzie treats a pregnant woman who has a broken arm and a history of drug abuse. She discovers that the woman is actually carrying her dead husband's baby, who was killed in Iraq. - Cristina and Burke operate on a boy who has a severe heart condition. They have to use a pig's heart valve as a temporary solution until they can find a donor. However, the boy's father refuses to consent to the surgery because of his religious beliefs. - George and Alex work on a man who has a live grenade lodged in his body. They have to keep him calm and stable until the bomb squad arrives. George bonds with the man and tries to comfort him as he faces death.

    Six Days

    -

    This episode is a two-part episode that consists of the eleventh and twelfth episodes of season 3. It aired on January 11th and January 18th, 2007. It focuses on the personal and professional lives of the doctors as they cope with various challenges. Some of the highlights of this episode are: - George's father Harold O'Malley is diagnosed with esophageal cancer and has to undergo multiple surgeries. George has to deal with his family's drama and his own grief as he watches his father deteriorate. - Meredith and Derek try to rekindle their relationship after breaking up with Finn and Addison. However, they face some obstacles when Meredith's half-sister Molly shows up pregnant and Derek operates on her baby. - Izzie struggles with her guilt and depression after Denny's death. She isolates herself from her friends and spends her time baking muffins. She also receives a check for $8.7 million from Denny's estate, which she doesn't know what to do with. - Cristina and Burke plan their wedding, but they have different visions of what they want. Cristina wants a simple and low-key ceremony, while Burke wants a traditional and lavish one. They also have to deal with their parents' opinions and expectations.

    Drowning on Dry Land

    -

    This episode is the sixteenth episode of season 3 and it aired on February 15th, 2007. It is the second part of a three-part arc that involves a ferry boat accident that injures many people. The episode follows the doctors as they try to save the lives of the victims and themselves. Some of the highlights of this episode are: - Meredith falls into the water after trying to help a patient and drowns. She is rescued by Derek and taken to the hospital, where she is in a coma. She has an out-of-body experience where she meets Denny, Dylan, Bonnie, and Liz, who are all dead. - Alex finds Jane Doe, a woman who has severe facial injuries and no memory of who she is. He becomes attached to her and tries to help her recover. He also confesses his love for her. - Izzie treats a man who has a severe head injury and hallucinates that he is her fiancé. She plays along with his fantasy and pretends to be his bride-to-be. - Cristina operates on Tucker Jones, Bailey's husband who was in a car accident on his way to see their son Tuck. She has to perform a difficult procedure to stop his bleeding while Bailey watches.

    Some Kind of Miracle

    -

    This episode is the seventeenth episode of season 3 and it aired on February 22nd, 2007. It is the third and final part of the ferry boat arc that concludes Meredith's near-death experience. The episode shows how Meredith's condition affects everyone around her and how she fights for her life. Some of the highlights of this episode are: - Meredith meets Ellis Grey in her afterlife, who tells her that she is anything but ordinary and that she should not give up on living. Meredith decides to wake up from her coma and reunites with Derek. - Jane Doe regains her memory and reveals that her name is Rebecca Pope and that she has a husband and a son. Alex is heartbroken by this revelation and feels betrayed by her. - Izzie realizes that her patient is not her fiancé but a stranger who has a brain tumor that causes him to have false memories. She decides to donate Denny's money to the hospital to build a free clinic in his honor. - Cristina accepts Burke's proposal and agrees to marry him.

    Conclusion

    -

    Grey's Anatomy season 3 is one of the most captivating and emotional seasons of the show. It has everything you need for an entertaining and engaging watch: drama, romance, humor, tragedy, suspense, and more. If you want to relive this season or watch it for the first time, you can download it from Kickass using our guide above. But remember to use a VPN service for your safety and privacy.

    -

    FAQs

    -

    Here are some frequently asked questions about Grey's Anatomy season 3:

    -
      -
    1. How many awards did Grey's Anatomy season 3 win?
    2. -and Outstanding Performance by a Female Actor in a Drama Series (Chandra Wilson); and two NAACP Image Awards for Outstanding Drama Series and Outstanding Supporting Actress in a Drama Series (Chandra Wilson).

      -
    3. Who dies in Grey's Anatomy season 3?
    4. -

      Several characters die in Grey's Anatomy season 3, including Denny Duquette, Meredith's half-sister's baby, Ellis Grey, Harold O'Malley, Dylan Young, Bonnie Crasnoff, and Liz Fallon.

      -
    5. Who leaves Grey's Anatomy after season 3?
    6. -

      Two main characters leave Grey's Anatomy after season 3: Preston Burke and Addison Montgomery. Burke leaves Cristina at the altar on their wedding day and moves away. Addison leaves Seattle Grace Hospital to start a new life in Los Angeles, where she joins a private practice.

      -
    7. What song plays at the end of Grey's Anatomy season 3 finale?
    8. -

      The song that plays at the end of Grey's Anatomy season 3 finale is "Keep Breathing" by Ingrid Michaelson. It is a poignant and hopeful song that reflects Meredith's recovery and the doctors' resilience.

      -
    9. Where can I watch Grey's Anatomy season 3 bloopers?
    10. -

      You can watch Grey's Anatomy season 3 bloopers on YouTube. Here is a link to one of the videos: https://www.youtube.com/watch?v=QzGy0xYx9wA.

      -
    -

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/rahul999r/Rahul_Kannada_TTS/src/hifi_gan/inference.py b/spaces/rahul999r/Rahul_Kannada_TTS/src/hifi_gan/inference.py deleted file mode 100644 index c70ee09b4110677b7cf9732d76a5e6ca93c8860c..0000000000000000000000000000000000000000 --- a/spaces/rahul999r/Rahul_Kannada_TTS/src/hifi_gan/inference.py +++ /dev/null @@ -1,98 +0,0 @@ -from __future__ import absolute_import, division, print_function, unicode_literals - -import glob -import os -import argparse -import json -import torch -from scipy.io.wavfile import write -from env import AttrDict -from meldataset import mel_spectrogram, MAX_WAV_VALUE, load_wav -from models import Generator - -h = None -device = None - - -def load_checkpoint(filepath, device): - assert os.path.isfile(filepath) - print("Loading '{}'".format(filepath)) - checkpoint_dict = torch.load(filepath, map_location=device) - print("Complete.") - return checkpoint_dict - - -def get_mel(x): - return mel_spectrogram( - x, h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size, h.fmin, h.fmax - ) - - -def scan_checkpoint(cp_dir, prefix): - pattern = os.path.join(cp_dir, prefix + "*") - cp_list = glob.glob(pattern) - if len(cp_list) == 0: - return "" - return sorted(cp_list)[-1] - - -def inference(a): - generator = Generator(h).to(device) - - state_dict_g = load_checkpoint(a.checkpoint_file, device) - generator.load_state_dict(state_dict_g["generator"]) - - filelist = os.listdir(a.input_wavs_dir) - - os.makedirs(a.output_dir, exist_ok=True) - - generator.eval() - generator.remove_weight_norm() - with torch.no_grad(): - for i, filname in enumerate(filelist): - wav, sr = load_wav(os.path.join(a.input_wavs_dir, filname)) - wav = wav / MAX_WAV_VALUE - wav = torch.FloatTensor(wav).to(device) - x = get_mel(wav.unsqueeze(0)) - y_g_hat = generator(x) - audio = y_g_hat.squeeze() - audio = audio * MAX_WAV_VALUE - audio = audio.cpu().numpy().astype("int16") - - output_file = os.path.join( - a.output_dir, os.path.splitext(filname)[0] + "_generated.wav" - ) - write(output_file, h.sampling_rate, audio) - print(output_file) - - -def main(): - print("Initializing Inference Process..") - - parser = argparse.ArgumentParser() - parser.add_argument("--input_wavs_dir", default="test_files") - parser.add_argument("--output_dir", default="generated_files") - parser.add_argument("--checkpoint_file", required=True) - a = parser.parse_args() - - config_file = os.path.join(os.path.split(a.checkpoint_file)[0], "config.json") - with open(config_file) as f: - data = f.read() - - global h - json_config = json.loads(data) - h = AttrDict(json_config) - - torch.manual_seed(h.seed) - global device - if torch.cuda.is_available(): - torch.cuda.manual_seed(h.seed) - device = torch.device("cuda") - else: - device = torch.device("cpu") - - inference(a) - - -if __name__ == "__main__": - main() diff --git a/spaces/rayan-saleh/whisper2notion/server/node_modules/body-parser/lib/types/json.js b/spaces/rayan-saleh/whisper2notion/server/node_modules/body-parser/lib/types/json.js deleted file mode 100644 index c2745be3a33fe16e30174e508c695b082852c8f7..0000000000000000000000000000000000000000 --- a/spaces/rayan-saleh/whisper2notion/server/node_modules/body-parser/lib/types/json.js +++ /dev/null @@ -1,236 +0,0 @@ -/*! - * body-parser - * Copyright(c) 2014 Jonathan Ong - * Copyright(c) 2014-2015 Douglas Christopher Wilson - * MIT Licensed - */ - -'use strict' - -/** - * Module dependencies. - * @private - */ - -var bytes = require('bytes') -var contentType = require('content-type') -var createError = require('http-errors') -var debug = require('debug')('body-parser:json') -var read = require('../read') -var typeis = require('type-is') - -/** - * Module exports. - */ - -module.exports = json - -/** - * RegExp to match the first non-space in a string. - * - * Allowed whitespace is defined in RFC 7159: - * - * ws = *( - * %x20 / ; Space - * %x09 / ; Horizontal tab - * %x0A / ; Line feed or New line - * %x0D ) ; Carriage return - */ - -var FIRST_CHAR_REGEXP = /^[\x20\x09\x0a\x0d]*([^\x20\x09\x0a\x0d])/ // eslint-disable-line no-control-regex - -/** - * Create a middleware to parse JSON bodies. - * - * @param {object} [options] - * @return {function} - * @public - */ - -function json (options) { - var opts = options || {} - - var limit = typeof opts.limit !== 'number' - ? bytes.parse(opts.limit || '100kb') - : opts.limit - var inflate = opts.inflate !== false - var reviver = opts.reviver - var strict = opts.strict !== false - var type = opts.type || 'application/json' - var verify = opts.verify || false - - if (verify !== false && typeof verify !== 'function') { - throw new TypeError('option verify must be function') - } - - // create the appropriate type checking function - var shouldParse = typeof type !== 'function' - ? typeChecker(type) - : type - - function parse (body) { - if (body.length === 0) { - // special-case empty json body, as it's a common client-side mistake - // TODO: maybe make this configurable or part of "strict" option - return {} - } - - if (strict) { - var first = firstchar(body) - - if (first !== '{' && first !== '[') { - debug('strict violation') - throw createStrictSyntaxError(body, first) - } - } - - try { - debug('parse json') - return JSON.parse(body, reviver) - } catch (e) { - throw normalizeJsonSyntaxError(e, { - message: e.message, - stack: e.stack - }) - } - } - - return function jsonParser (req, res, next) { - if (req._body) { - debug('body already parsed') - next() - return - } - - req.body = req.body || {} - - // skip requests without bodies - if (!typeis.hasBody(req)) { - debug('skip empty body') - next() - return - } - - debug('content-type %j', req.headers['content-type']) - - // determine if request should be parsed - if (!shouldParse(req)) { - debug('skip parsing') - next() - return - } - - // assert charset per RFC 7159 sec 8.1 - var charset = getCharset(req) || 'utf-8' - if (charset.slice(0, 4) !== 'utf-') { - debug('invalid charset') - next(createError(415, 'unsupported charset "' + charset.toUpperCase() + '"', { - charset: charset, - type: 'charset.unsupported' - })) - return - } - - // read - read(req, res, next, parse, debug, { - encoding: charset, - inflate: inflate, - limit: limit, - verify: verify - }) - } -} - -/** - * Create strict violation syntax error matching native error. - * - * @param {string} str - * @param {string} char - * @return {Error} - * @private - */ - -function createStrictSyntaxError (str, char) { - var index = str.indexOf(char) - var partial = index !== -1 - ? str.substring(0, index) + '#' - : '' - - try { - JSON.parse(partial); /* istanbul ignore next */ throw new SyntaxError('strict violation') - } catch (e) { - return normalizeJsonSyntaxError(e, { - message: e.message.replace('#', char), - stack: e.stack - }) - } -} - -/** - * Get the first non-whitespace character in a string. - * - * @param {string} str - * @return {function} - * @private - */ - -function firstchar (str) { - var match = FIRST_CHAR_REGEXP.exec(str) - - return match - ? match[1] - : undefined -} - -/** - * Get the charset of a request. - * - * @param {object} req - * @api private - */ - -function getCharset (req) { - try { - return (contentType.parse(req).parameters.charset || '').toLowerCase() - } catch (e) { - return undefined - } -} - -/** - * Normalize a SyntaxError for JSON.parse. - * - * @param {SyntaxError} error - * @param {object} obj - * @return {SyntaxError} - */ - -function normalizeJsonSyntaxError (error, obj) { - var keys = Object.getOwnPropertyNames(error) - - for (var i = 0; i < keys.length; i++) { - var key = keys[i] - if (key !== 'stack' && key !== 'message') { - delete error[key] - } - } - - // replace stack before message for Node.js 0.10 and below - error.stack = obj.stack.replace(error.message, obj.message) - error.message = obj.message - - return error -} - -/** - * Get the simple type checker. - * - * @param {string} type - * @return {function} - */ - -function typeChecker (type) { - return function checkType (req) { - return Boolean(typeis(req, type)) - } -} diff --git a/spaces/rayan-saleh/whisper2notion/server/node_modules/braces/lib/compile.js b/spaces/rayan-saleh/whisper2notion/server/node_modules/braces/lib/compile.js deleted file mode 100644 index 3e984a4bbc29983535c0be07700941373d817d94..0000000000000000000000000000000000000000 --- a/spaces/rayan-saleh/whisper2notion/server/node_modules/braces/lib/compile.js +++ /dev/null @@ -1,57 +0,0 @@ -'use strict'; - -const fill = require('fill-range'); -const utils = require('./utils'); - -const compile = (ast, options = {}) => { - let walk = (node, parent = {}) => { - let invalidBlock = utils.isInvalidBrace(parent); - let invalidNode = node.invalid === true && options.escapeInvalid === true; - let invalid = invalidBlock === true || invalidNode === true; - let prefix = options.escapeInvalid === true ? '\\' : ''; - let output = ''; - - if (node.isOpen === true) { - return prefix + node.value; - } - if (node.isClose === true) { - return prefix + node.value; - } - - if (node.type === 'open') { - return invalid ? (prefix + node.value) : '('; - } - - if (node.type === 'close') { - return invalid ? (prefix + node.value) : ')'; - } - - if (node.type === 'comma') { - return node.prev.type === 'comma' ? '' : (invalid ? node.value : '|'); - } - - if (node.value) { - return node.value; - } - - if (node.nodes && node.ranges > 0) { - let args = utils.reduce(node.nodes); - let range = fill(...args, { ...options, wrap: false, toRegex: true }); - - if (range.length !== 0) { - return args.length > 1 && range.length > 1 ? `(${range})` : range; - } - } - - if (node.nodes) { - for (let child of node.nodes) { - output += walk(child, node); - } - } - return output; - }; - - return walk(ast); -}; - -module.exports = compile; diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Call.of.duty.ghosts.english.language.packl LINK.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Call.of.duty.ghosts.english.language.packl LINK.md deleted file mode 100644 index 64d8bc7d87e4233f68185c3b96c8e249c35e72cc..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Call.of.duty.ghosts.english.language.packl LINK.md +++ /dev/null @@ -1,94 +0,0 @@ -

    Call.of.duty.ghosts.english.language.packl


    Download ……… https://urlgoal.com/2uCMNZ



    - -Languages: 0Available versions:Total downloads: 26,287Last month: 29 - -Games like Fruit Ninja, Angry Birds, Temple Run have made the touch screen a popular platform for gaming. That is why, we bring you this list of games for Android. Have a look and download these games for your Android phone. - -If you love the classic games of the 1980s and 1990s like Pacman and Space Invaders, you will enjoy these games for Android. The graphics have been taken from the originals but they are being played on your phone screen. - -File name - -File size - -Saved size - -Time for Android - -Pac-Man Virtual Edition - -1.51 MB - -3.11 MB - -2 days - -Drop.it - -7.03 MB - -9.28 MB - -1 day - -Fruit Ninja - -2.95 MB - -2.25 MB - -Curious George - -5.06 MB - -5.29 MB - -Enchants of Istarsia: The Chaos Seeds - -4.12 MB - -4.27 MB - -Get ready to play with any of the following games that we have for you and your phone. - -Happy gaming! - -10.02.2011 - - -20.12.2015 - -Screenshots - -Comments - -Hello, is there anyone who knows how to create a remote app like teamviewer or any virtual machine for android? i want to install a linux distro that let me do really what i want, without the lags of android, i want to install it on my phone and i want to remote control it! is there anyone who can help me, or help me? - -09.08.2015 - - -10.11.2015 - -Terse - -01.03.2016 - - -01.03.2016 - -this is my last few questions to anyone. - -Kyrix - -02.03.2016 - - -02.03.2016 - -Budji - -03.03.2016 - - -03.03.2016 - -Horo follo - -Jayzee 4fefd39f24
    -
    -
    -

    diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Cd-key Gamehouse Games Collection Crack ((NEW)).md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Cd-key Gamehouse Games Collection Crack ((NEW)).md deleted file mode 100644 index ba629cac34388f8d50c6f57fd19d58351b94f0bb..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Cd-key Gamehouse Games Collection Crack ((NEW)).md +++ /dev/null @@ -1,6 +0,0 @@ -

    cd-key gamehouse games collection crack


    Download Zip ———>>> https://urlgoal.com/2uCLEd



    -
    -serial keys registration codes for software ... registration key generator oberon games ... gamehouse fishing craze registration key ... registration key collection 1fdad05405
    -
    -
    -

    diff --git a/spaces/renumics/commonlit-student-summaries/run.py b/spaces/renumics/commonlit-student-summaries/run.py deleted file mode 100644 index bbc44c5275b39d69d19c2774b6856a6cc8253c60..0000000000000000000000000000000000000000 --- a/spaces/renumics/commonlit-student-summaries/run.py +++ /dev/null @@ -1,22 +0,0 @@ -import pandas as pd -from renumics import spotlight - -if __name__ == "__main__": - df = pd.read_csv("dataset.csv") - while True: - dtypes = { - "text_len": float, - "text_embedding": spotlight.Embedding, - "text_embedding_reduced": spotlight.Embedding, - } - - view = spotlight.show( - df, - dtype=dtypes, - layout="spotlight-layout.json", - port=7860, - host="0.0.0.0", - allow_filebrowsing=False, - ) - - view.close() diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/datasets/pipelines/instaboost.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/datasets/pipelines/instaboost.py deleted file mode 100644 index ca10c4c751f5309e37822fbe61ea3c7ed5de1b83..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/datasets/pipelines/instaboost.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np - -from ..builder import PIPELINES - - -@PIPELINES.register_module() -class InstaBoost: - r"""Data augmentation method in `InstaBoost: Boosting Instance - Segmentation Via Probability Map Guided Copy-Pasting - `_. - - Refer to https://github.com/GothicAi/Instaboost for implementation details. - - Args: - action_candidate (tuple): Action candidates. "normal", "horizontal", \ - "vertical", "skip" are supported. Default: ('normal', \ - 'horizontal', 'skip'). - action_prob (tuple): Corresponding action probabilities. Should be \ - the same length as action_candidate. Default: (1, 0, 0). - scale (tuple): (min scale, max scale). Default: (0.8, 1.2). - dx (int): The maximum x-axis shift will be (instance width) / dx. - Default 15. - dy (int): The maximum y-axis shift will be (instance height) / dy. - Default 15. - theta (tuple): (min rotation degree, max rotation degree). \ - Default: (-1, 1). - color_prob (float): Probability of images for color augmentation. - Default 0.5. - heatmap_flag (bool): Whether to use heatmap guided. Default False. - aug_ratio (float): Probability of applying this transformation. \ - Default 0.5. - """ - - def __init__(self, - action_candidate=('normal', 'horizontal', 'skip'), - action_prob=(1, 0, 0), - scale=(0.8, 1.2), - dx=15, - dy=15, - theta=(-1, 1), - color_prob=0.5, - hflag=False, - aug_ratio=0.5): - try: - import instaboostfast as instaboost - except ImportError: - raise ImportError( - 'Please run "pip install instaboostfast" ' - 'to install instaboostfast first for instaboost augmentation.') - self.cfg = instaboost.InstaBoostConfig(action_candidate, action_prob, - scale, dx, dy, theta, - color_prob, hflag) - self.aug_ratio = aug_ratio - - def _load_anns(self, results): - labels = results['ann_info']['labels'] - masks = results['ann_info']['masks'] - bboxes = results['ann_info']['bboxes'] - n = len(labels) - - anns = [] - for i in range(n): - label = labels[i] - bbox = bboxes[i] - mask = masks[i] - x1, y1, x2, y2 = bbox - # assert (x2 - x1) >= 1 and (y2 - y1) >= 1 - bbox = [x1, y1, x2 - x1, y2 - y1] - anns.append({ - 'category_id': label, - 'segmentation': mask, - 'bbox': bbox - }) - - return anns - - def _parse_anns(self, results, anns, img): - gt_bboxes = [] - gt_labels = [] - gt_masks_ann = [] - for ann in anns: - x1, y1, w, h = ann['bbox'] - # TODO: more essential bug need to be fixed in instaboost - if w <= 0 or h <= 0: - continue - bbox = [x1, y1, x1 + w, y1 + h] - gt_bboxes.append(bbox) - gt_labels.append(ann['category_id']) - gt_masks_ann.append(ann['segmentation']) - gt_bboxes = np.array(gt_bboxes, dtype=np.float32) - gt_labels = np.array(gt_labels, dtype=np.int64) - results['ann_info']['labels'] = gt_labels - results['ann_info']['bboxes'] = gt_bboxes - results['ann_info']['masks'] = gt_masks_ann - results['img'] = img - return results - - def __call__(self, results): - img = results['img'] - ori_type = img.dtype - anns = self._load_anns(results) - if np.random.choice([0, 1], p=[1 - self.aug_ratio, self.aug_ratio]): - try: - import instaboostfast as instaboost - except ImportError: - raise ImportError('Please run "pip install instaboostfast" ' - 'to install instaboostfast first.') - anns, img = instaboost.get_new_data( - anns, img.astype(np.uint8), self.cfg, background=None) - - results = self._parse_anns(results, anns, img.astype(ori_type)) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(cfg={self.cfg}, aug_ratio={self.aug_ratio})' - return repr_str diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/utils/compat_config.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/utils/compat_config.py deleted file mode 100644 index 05aa37dcd6f74dd1884069e90edf39684c897798..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/utils/compat_config.py +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import warnings - -from mmcv import ConfigDict - - -def compat_cfg(cfg): - """This function would modify some filed to keep the compatibility of - config. - - For example, it will move some args which will be deprecated to the correct - fields. - """ - cfg = copy.deepcopy(cfg) - cfg = compat_imgs_per_gpu(cfg) - cfg = compat_loader_args(cfg) - cfg = compat_runner_args(cfg) - return cfg - - -def compat_runner_args(cfg): - if 'runner' not in cfg: - cfg.runner = ConfigDict({ - 'type': 'EpochBasedRunner', - 'max_epochs': cfg.total_epochs - }) - warnings.warn( - 'config is now expected to have a `runner` section, ' - 'please set `runner` in your config.', UserWarning) - else: - if 'total_epochs' in cfg: - assert cfg.total_epochs == cfg.runner.max_epochs - return cfg - - -def compat_imgs_per_gpu(cfg): - cfg = copy.deepcopy(cfg) - if 'imgs_per_gpu' in cfg.data: - warnings.warn('"imgs_per_gpu" is deprecated in MMDet V2.0. ' - 'Please use "samples_per_gpu" instead') - if 'samples_per_gpu' in cfg.data: - warnings.warn( - f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and ' - f'"samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"' - f'={cfg.data.imgs_per_gpu} is used in this experiments') - else: - warnings.warn('Automatically set "samples_per_gpu"="imgs_per_gpu"=' - f'{cfg.data.imgs_per_gpu} in this experiments') - cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu - return cfg - - -def compat_loader_args(cfg): - """Deprecated sample_per_gpu in cfg.data.""" - - cfg = copy.deepcopy(cfg) - if 'train_dataloader' not in cfg.data: - cfg.data['train_dataloader'] = ConfigDict() - if 'val_dataloader' not in cfg.data: - cfg.data['val_dataloader'] = ConfigDict() - if 'test_dataloader' not in cfg.data: - cfg.data['test_dataloader'] = ConfigDict() - - # special process for train_dataloader - if 'samples_per_gpu' in cfg.data: - - samples_per_gpu = cfg.data.pop('samples_per_gpu') - assert 'samples_per_gpu' not in \ - cfg.data.train_dataloader, ('`samples_per_gpu` are set ' - 'in `data` field and ` ' - 'data.train_dataloader` ' - 'at the same time. ' - 'Please only set it in ' - '`data.train_dataloader`. ') - cfg.data.train_dataloader['samples_per_gpu'] = samples_per_gpu - - if 'persistent_workers' in cfg.data: - - persistent_workers = cfg.data.pop('persistent_workers') - assert 'persistent_workers' not in \ - cfg.data.train_dataloader, ('`persistent_workers` are set ' - 'in `data` field and ` ' - 'data.train_dataloader` ' - 'at the same time. ' - 'Please only set it in ' - '`data.train_dataloader`. ') - cfg.data.train_dataloader['persistent_workers'] = persistent_workers - - if 'workers_per_gpu' in cfg.data: - - workers_per_gpu = cfg.data.pop('workers_per_gpu') - cfg.data.train_dataloader['workers_per_gpu'] = workers_per_gpu - cfg.data.val_dataloader['workers_per_gpu'] = workers_per_gpu - cfg.data.test_dataloader['workers_per_gpu'] = workers_per_gpu - - # special process for val_dataloader - if 'samples_per_gpu' in cfg.data.val: - # keep default value of `sample_per_gpu` is 1 - assert 'samples_per_gpu' not in \ - cfg.data.val_dataloader, ('`samples_per_gpu` are set ' - 'in `data.val` field and ` ' - 'data.val_dataloader` at ' - 'the same time. ' - 'Please only set it in ' - '`data.val_dataloader`. ') - cfg.data.val_dataloader['samples_per_gpu'] = \ - cfg.data.val.pop('samples_per_gpu') - # special process for val_dataloader - - # in case the test dataset is concatenated - if isinstance(cfg.data.test, dict): - if 'samples_per_gpu' in cfg.data.test: - assert 'samples_per_gpu' not in \ - cfg.data.test_dataloader, ('`samples_per_gpu` are set ' - 'in `data.test` field and ` ' - 'data.test_dataloader` ' - 'at the same time. ' - 'Please only set it in ' - '`data.test_dataloader`. ') - - cfg.data.test_dataloader['samples_per_gpu'] = \ - cfg.data.test.pop('samples_per_gpu') - - elif isinstance(cfg.data.test, list): - for ds_cfg in cfg.data.test: - if 'samples_per_gpu' in ds_cfg: - assert 'samples_per_gpu' not in \ - cfg.data.test_dataloader, ('`samples_per_gpu` are set ' - 'in `data.test` field and ` ' - 'data.test_dataloader` at' - ' the same time. ' - 'Please only set it in ' - '`data.test_dataloader`. ') - samples_per_gpu = max( - [ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test]) - cfg.data.test_dataloader['samples_per_gpu'] = samples_per_gpu - - return cfg diff --git a/spaces/rorallitri/biomedical-language-models/logs/(PDF) Passives in Modern Tamil - ResearchGate[2].md b/spaces/rorallitri/biomedical-language-models/logs/(PDF) Passives in Modern Tamil - ResearchGate[2].md deleted file mode 100644 index e124da24a07f7f3c4e78820e171de92706ae1e14..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/(PDF) Passives in Modern Tamil - ResearchGate[2].md +++ /dev/null @@ -1,24 +0,0 @@ - -

    Passive voice is used in writing when you want to emphasize the object of a sentence, while active voice is used when you want to emphasize the subject of a sentence. Knowing the difference between these 2 voices is vital for students and writers to understand. To teach active and passive voice, make sure to identify the subject and verb in a sentence, explain the difference between active and passive voice, and rearrange sentences from passive to active using the same verb tense.

    -

    In grammar, the voice of a verb describes the relationship between the action (or state) that the verb expresses and the participants identified by its arguments (subject, object, etc.). When the subject is the agent or doer of the action, the verb is in the active voice. When the subject is the patient, target or undergoer of the action, the verb is said to be in the passive voice.[1][2][3] When the subject both performs and receives the action expressed by the verb, the verb is in the middle voice. Voice is sometimes called diathesis.[4]

    -

    active voice and passive voice in tamil pdf download


    DOWNLOAD >>>>> https://tinurll.com/2uzmvP



    -

    The following pair of examples illustrates the contrast between active and passive voice in English. In sentence (1), the verb form ate is in the active voice, but in sentence (2), the verb form was eaten is in the passive voice. Independent of voice, the cat is the Agent (the doer) of the action of eating in both sentences.

    -

    In a transformation from an active-voice clause to an equivalent passive-voice construction, the subject and the direct object switch grammatical roles. The direct object gets promoted to subject, and the subject demoted to an (optional) adjunct. In the first example above, the mouse serves as the direct object in the active-voice version, but becomes the subject in the passive version. The subject of the active-voice version, the cat, becomes part of a prepositional phrase in the passive version of the sentence, and can be left out entirely; The mouse was eaten.

    -

    The active voice is the most commonly used in many languages and represents the "normal" case, in which the subject of the verb is the agent. In the active voice, the subject of the sentence performs the action or causes the happening denoted by the verb. Sentence (1) is in active voice, as indicated by the verb form saw.

    -

    The passive voice is employed in a clause whose subject expresses the theme or patient of the verb. That is, it undergoes an action or has its state changed.[7] In the passive voice, the grammatical subject of the verb is the recipient (not the doer) of the action denoted by the verb. In English it serves a variety of functions including focusing on the object, demoting the subject and handling situations where the speaker either wants to suppress information about who the doer of the action is, or in reality does not know their identity, or when the doer is either unimportant or likely to be common knowledge. There are syntactic, semantic, and pragmatic motivations for choosing the passive voice instead of the active.[8] Some languages, such as English and Spanish, use a periphrastic passive voice; that is, it is not a single word form, but rather a construction making use of other word forms. Specifically, it is made up of a form of the auxiliary verb to be and a past participle of the main verb which carries the lexical content of the predicate. In other languages, such as Latin, the passive voice for some tenses is simply marked on the verb by inflection: librum legit "He reads the book"; liber legitur "The book is read".

    -

    Some languages (such as Albanian, Bengali, Fula, Tamil, Sanskrit, Icelandic, Swedish and Ancient Greek) have a middle voice, which is a set of inflections or constructions which is to some extent different from both the active and passive voices.

    -

    The subject of such middle voice is like the subject of active voice as well as the subject of passive voice, in that it performs an action, and is also affected by that action.[8] Another difference between middle voice and the other two grammatical voices is that there are middle marked verbs for which no corresponding active verb form exists.[9] In some cases, the middle voice is any grammatical option where the subject of a material process cannot be categorized as either an actor (someone doing something) or a goal (that at which the actor aims their work). For example, while the passive voice expresses a medium (goal) being affected by an external agent (actor) as in sentence (4), the middle voice expresses a medium undergoing change without any external agent as in sentence (5). In English, though the inflection for middle voice and active voice are the same for these cases, they differ in whether or not they permit the expression of the Agent argument in an oblique by-phrase PP: thus while the by-phrase is possible with passive voice as in sentence (6), it is not possible with middle voice, as shown by the ill-formed sentence (7).

    -

    -

    In Classical Greek, the middle voice is often used for material processes where the subject is both the actor (the one doing the action) and the medium (that which is undergoing change) as in "the man got a shave", opposing both active and passive voices where the medium is the goal as in "The barber shaved the man" and "The man got shaved by the barber". Finally, it can occasionally be used in a causative sense, such as "The father causes his son to be set free", or "The father ransoms his son".

    -

    Topic-prominent languages like Mandarin tend not to employ the passive voice as frequently. In general, Mandarin used to be best analyzed using middle voice, but Mandarin-speakers can construct a passive voice by using the coverb 被 (bèi) and rearranging the usual word order.[17] For example, this sentence using active voice:

    -

    In addition, through the addition of the auxiliary verb "to be" 是 (shì) the passive voice is frequently used to emphasize the identity of the actor. This example places emphasis on the dog, presumably as opposed to some other animal:

    -

    Recently, more syntacticians investigated passive voice in Mandarin. They discovered that passive voice in Mandarin is heavily dependent on the context of the sentence rather than the grammatical forms.[18]Therefore, passive voice can be marked (e.g. by the most broadly used passive marker: Bei 被 [mentioned above]) or unmarked (see the "Notional Passive" section below) in both speech and writing. Those sentences have a passive marker called the long passive, while the ones that do not require a passive marker are called short passive.[19]

    -

    No formal passive marker is present, but the passive voice is introduced by a verb that indicates the subject as the receiver of the action, then the verb is followed by an object. The literary meaning is quite similar to English inverted sentences. It is usually a formal tone. Common indicators are a set of verbs, like dedao得到, shoudao受到, zaodao遭到 (the three most common verbs used in lexical passive), etc.

    -

    However, Li et al. (1981), when arguing against Chao's analysis of Mandarin, stated that there is a distinct class of middle voice verbs. They recognize that Mandarin (and Cantonese) verbs as a whole behave the same way. Later, Li et al. (1981) introduced middle voice sentences as examples of topic/comment constructions which lacks an overt subject.[22]

    -

    While Ting (2006) compared between middles and Ba constructions (= active voice) involving intransitive V-de (得) resultatives. He also did comparison between middles and inchoatives. He argues that we can treat notional passives in Mandarin as middle constructions. Its underlying grammatical subject position and lack of a syntactically active logical subject are best explained by a presyntactic approach. But, semantically, Chinese middle voice may be interpreted like stative or verbal passives.[23]

    -

    Ting argues that sentence a) is ungrammatical and indistinguishable from ergatives, and that sentence b) is grammatical and he believes that it must have used middle voice due to their function of defocusing an agent subject. Although Bei construction in passive voice can achieve the same purpose, there is a possibility that associating with Bei construction may be inappropriate in many contexts. Thus, using middle voice is better in this case.

    -

    In the actor-emphasizing passive voice of Cantonese, besides the addition of the auxiliary verb "to be" 係 (hai6), the perfective event is also converted to an adjective-like predicative with the suffix 嘅 (ge3) or 㗎 (gaa3), which is a more emphasized one from the liaison of 嘅 (ge3) and 啊 (aa3):

    -

    Although a topic-prominent language, Japanese employs the passive voice quite frequently, and has two types of passive voice, direct voice which corresponds to that in English and an indirect passive which is not found in English. The passive voice in Japanese is constructed with the verb stem followed by the passive morpheme -(r)are. This synthetic passive morpheme can attach to transitive, ditransitive and some intransitive verbs.[26] The word order in Japanese is more flexible so passive sentences can be both SOV (subject + object + verb) and OSV (object + subject + verb) order; however, SOV is typically used more often.[25] Furthermore, there are two theories about passive voice in Japanese called the uniform and non-uniform theory.[citation needed] These two theories debate whether direct and indirect passives should be treated equally or if they should be treated differently.[citation needed]

    -

    Indirect passives have two varieties, possessive passives and gapless passives. In possessive passives, the grammatical subject stands in a canonical possessive relation with the direct object and in gapless passives they appear to lack an active counterpart and contain an extra argument is realized as the grammatical subject that is unlicensed by the main verb. Indirect passives can also be used when something undesirable happens to the speaker.[24]

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/rorallitri/biomedical-language-models/logs/Free Download Film Negeri 5 Menara Full Versioninstmankl How to Achieve Your Dreams with Man Jadda Wajada.md b/spaces/rorallitri/biomedical-language-models/logs/Free Download Film Negeri 5 Menara Full Versioninstmankl How to Achieve Your Dreams with Man Jadda Wajada.md deleted file mode 100644 index 560cad3e0fe91648b371ec364162318da163e2ed..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Free Download Film Negeri 5 Menara Full Versioninstmankl How to Achieve Your Dreams with Man Jadda Wajada.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Castlevania Lords Of Shadow 2 Revelations Download Pcl


    Download File >>>>> https://tinurll.com/2uznjb



    - - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/samcaicn/bingai/src/components/ui/textarea.tsx b/spaces/samcaicn/bingai/src/components/ui/textarea.tsx deleted file mode 100644 index e25af722c7a5dc1121a9ab58d6716952f9f76081..0000000000000000000000000000000000000000 --- a/spaces/samcaicn/bingai/src/components/ui/textarea.tsx +++ /dev/null @@ -1,24 +0,0 @@ -import * as React from 'react' - -import { cn } from '@/lib/utils' - -export interface TextareaProps - extends React.TextareaHTMLAttributes {} - -const Textarea = React.forwardRef( - ({ className, ...props }, ref) => { - return ( -