parquet-converter commited on
Commit
a111184
·
1 Parent(s): e2713fe

Update parquet files (step 48 of 121)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/123Kumar/vits-uma-genshin-honkai123/Docker/Dockerfile +0 -12
  2. spaces/17TheWord/RealESRGAN/scripts/pytorch2onnx.py +0 -36
  3. spaces/1gistliPinn/ChatGPT4/Examples/Application Android Sur Geant Cx 88 Hd - HOT.md +0 -6
  4. spaces/1gistliPinn/ChatGPT4/Examples/CRACK Intuit QuickBooks Enterprise 18.0 R3 License Key FREE.md +0 -6
  5. spaces/1gistliPinn/ChatGPT4/Examples/Filmora9 [REPACK] Crack License Key Full [Latest].md +0 -6
  6. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Descarga el APK de WhatsApp Messenger y accede a chats llamadas y videollamadas cifradas.md +0 -139
  7. spaces/1phancelerku/anime-remove-background/Become a Football Legend with Vive le Football for Android.md +0 -110
  8. spaces/2ndelement/voicevox/voicevox_engine/engine_manifest/EngineManifestLoader.py +0 -46
  9. spaces/4Taps/SadTalker/modules/sadtalker_test.py +0 -118
  10. spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/configs/speed.py +0 -23
  11. spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/nets_537238KB.py +0 -123
  12. spaces/AB-TW/team-ai/documents/bussiness_context/business_context.md +0 -19
  13. spaces/AEUPH/SENTIENCE_PROGRAMMING_LANGUAGE/README.md +0 -11
  14. spaces/AI-Hobbyist/Hoyo-RVC/i18n/locale_diff.py +0 -45
  15. spaces/AIConsultant/MusicGen/docs/CONDITIONING.md +0 -146
  16. spaces/AIFILMS/StyleGANEX/models/stylegan2/op_ori/__init__.py +0 -2
  17. spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/remove_optimizer.py +0 -18
  18. spaces/AIML-TUDA/safe-stable-diffusion/app.py +0 -349
  19. spaces/AONYLMR/White-box-Cartoonization/wbc/cartoonize.py +0 -112
  20. spaces/AONYLMR/anime-remove-background/app.py +0 -52
  21. spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/components/LoadingModalWritable.js +0 -6
  22. spaces/Adapter/CoAdapter/ldm/modules/extra_condition/midas/midas/base_model.py +0 -16
  23. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/drag/Drag.js +0 -2
  24. spaces/Ahmadjaved/Genaispeech/app.py +0 -164
  25. spaces/AlexWang/lama/bin/make_checkpoint.py +0 -79
  26. spaces/Alpaca233/SadTalker/src/face3d/util/__init__.py +0 -3
  27. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/spectrogram_diffusion/__init__.py +0 -26
  28. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/controlnet/test_controlnet_img2img.py +0 -449
  29. spaces/Andy1621/uniformer_image_detection/configs/hrnet/htc_x101_64x4d_fpn_16x1_28e_coco.py +0 -4
  30. spaces/Andy1621/uniformer_image_detection/configs/sabl/sabl_faster_rcnn_r101_fpn_1x_coco.py +0 -36
  31. spaces/Andy1621/uniformer_image_segmentation/configs/_base_/datasets/stare.py +0 -59
  32. spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_480x480_80k_pascal_context_59.py +0 -2
  33. spaces/Anew5128/Anew51/tts_edge.py +0 -34
  34. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/fileio/handlers/json_handler.py +0 -36
  35. spaces/Awesimo/jojogan/model.py +0 -688
  36. spaces/BAAI/AltDiffusion/app.py +0 -330
  37. spaces/Bart92/RVC_HF/utils/dependency.py +0 -170
  38. spaces/BartPoint/VoiceChange_Beta/infer_pack/modules/F0Predictor/DioF0Predictor.py +0 -90
  39. spaces/Benson/text-generation/Examples/Casa De Diseo Fijar Y Flip Mod Apk Pc.md +0 -101
  40. spaces/Benson/text-generation/Examples/Chicos Tropiezo Apk Obb Descargar.md +0 -68
  41. spaces/Benson/text-generation/Examples/Descargar Fuera De La Carretera Mod Apk Todos Los Coches Desbloqueados ltima Versin.md +0 -55
  42. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/metadata/__init__.py +0 -127
  43. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/network/__init__.py +0 -2
  44. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/formatter.py +0 -94
  45. spaces/Branon/TurboKeys/Dockerfile +0 -11
  46. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/configs/Detectron1-Comparisons/README.md +0 -82
  47. spaces/CVPR/LIVE/pybind11/tests/test_buffers.py +0 -109
  48. spaces/CVPR/LIVE/thrust/thrust/replace.h +0 -823
  49. spaces/CVPR/LIVE/thrust/thrust/system/tbb/memory.h +0 -99
  50. spaces/CVPR/WALT/mmdet/models/detectors/mask_scoring_rcnn.py +0 -27
spaces/123Kumar/vits-uma-genshin-honkai123/Docker/Dockerfile DELETED
@@ -1,12 +0,0 @@
1
- FROM python:3.9-bullseye
2
- VOLUME ["/app"]
3
- WORKDIR /app
4
- # Set apt to Chinese mirror
5
- RUN sed -i 's/deb.debian.org/mirrors.ustc.edu.cn/g' /etc/apt/sources.list
6
- RUN apt-get update && apt-get -y install cmake git
7
- RUN git clone https://huggingface.co/spaces/ikechan8370/vits-uma-genshin-honkai
8
- WORKDIR /app/vits-uma-genshin-honkai
9
- RUN sed -i "s/\.launch()/\.launch(server_name=\"0.0.0.0\")/" /app/vits-uma-genshin-honkai/app.py
10
- ADD vits.sh /app/vits.sh
11
- EXPOSE 7860
12
- ENTRYPOINT [ "/app/vits.sh" ]
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/17TheWord/RealESRGAN/scripts/pytorch2onnx.py DELETED
@@ -1,36 +0,0 @@
1
- import argparse
2
- import torch
3
- import torch.onnx
4
- from basicsr.archs.rrdbnet_arch import RRDBNet
5
-
6
-
7
- def main(args):
8
- # An instance of the model
9
- model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
10
- if args.params:
11
- keyname = 'params'
12
- else:
13
- keyname = 'params_ema'
14
- model.load_state_dict(torch.load(args.input)[keyname])
15
- # set the train mode to false since we will only run the forward pass.
16
- model.train(False)
17
- model.cpu().eval()
18
-
19
- # An example input
20
- x = torch.rand(1, 3, 64, 64)
21
- # Export the model
22
- with torch.no_grad():
23
- torch_out = torch.onnx._export(model, x, args.output, opset_version=11, export_params=True)
24
- print(torch_out.shape)
25
-
26
-
27
- if __name__ == '__main__':
28
- """Convert pytorch model to onnx models"""
29
- parser = argparse.ArgumentParser()
30
- parser.add_argument(
31
- '--input', type=str, default='experiments/pretrained_models/RealESRGAN_x4plus.pth', help='Input model path')
32
- parser.add_argument('--output', type=str, default='realesrgan-x4.onnx', help='Output onnx path')
33
- parser.add_argument('--params', action='store_false', help='Use params instead of params_ema')
34
- args = parser.parse_args()
35
-
36
- main(args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Application Android Sur Geant Cx 88 Hd - HOT.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Application Android Sur Geant Cx 88 Hd -</h2><br /><p><b><b>Download</b> &middot;&middot;&middot; <a href="https://imgfil.com/2uy0F2">https://imgfil.com/2uy0F2</a></b></p><br /><br />
2
- <br />
3
- Applications Android Pour Geant 88 Hd New DOWNLOAD. ... update Geant GN- CX 8 Flash Samsat HD70 To HD80 GALAXY *W3 ON 16/06/ Upgrade By USB. 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/CRACK Intuit QuickBooks Enterprise 18.0 R3 License Key FREE.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>CRACK Intuit QuickBooks Enterprise 18.0 R3 License Key</h2><br /><p><b><b>Download</b> &#10004;&#10004;&#10004; <a href="https://imgfil.com/2uxXTt">https://imgfil.com/2uxXTt</a></b></p><br /><br />
2
- <br />
3
- Intuit QuickBooks Enterprise Accountant 2016 16.0 R3 Final - Small business ... idm 6.23QuickBooks Pro 2018 Serial Key With Crack Download. 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Filmora9 [REPACK] Crack License Key Full [Latest].md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Filmora9 Crack License Key Full [Latest]</h2><br /><p><b><b>DOWNLOAD</b> &#127379; <a href="https://imgfil.com/2uy0RU">https://imgfil.com/2uy0RU</a></b></p><br /><br />
2
-
3
- ... Full download of Wondershare Filmora 9, free activation of Wondershare Filmora 9, free serial key Wondershare Filmora 9 with licensed email registration code .. . ## What's new in Filmora 9 Final? 8a78ff9644<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Descarga el APK de WhatsApp Messenger y accede a chats llamadas y videollamadas cifradas.md DELETED
@@ -1,139 +0,0 @@
1
-
2
- <h1>Descargar WhatsApp Messenger APK: Cómo y por qué hacerlo</h1>
3
- <p>WhatsApp Messenger es una de las aplicaciones de mensajería más populares y utilizadas en todo el mundo. Con más de 2000 millones de usuarios activos, WhatsApp te permite enviar mensajes de texto, voz, imágenes, vídeos, documentos y otros tipos de archivos a tus contactos, así como realizar llamadas y videollamadas gratuitas a través de internet. Además, WhatsApp cuenta con un cifrado de extremo a extremo que protege la privacidad y seguridad de tus conversaciones.</p>
4
- <h2>descargar whatsapp messenger apk</h2><br /><p><b><b>Download</b> &ndash;&ndash;&ndash; <a href="https://urlin.us/2uSXm7">https://urlin.us/2uSXm7</a></b></p><br /><br />
5
- <p>Pero ¿qué pasa si quieres descargar el APK de WhatsApp Messenger? ¿Qué ventajas tiene hacerlo? ¿Qué riesgos implica? ¿Qué alternativas hay a WhatsApp? En este artículo te explicamos todo lo que necesitas saber sobre cómo y por qué descargar el APK de WhatsApp Messenger.</p>
6
- <h2>Qué es WhatsApp Messenger y qué ventajas tiene</h2>
7
- <p>WhatsApp Messenger es una aplicación de mensajería instantánea que funciona con la tecnología Voice over IP (VoIP), es decir, que utiliza la conexión a internet para enviar y recibir mensajes y llamadas. WhatsApp es propiedad de Facebook, Inc., que la adquirió en 2014 por 19 mil millones de dólares.</p>
8
- <h3>Una aplicación de mensajería gratuita y multiplataforma</h3>
9
- <p>Una de las principales ventajas de WhatsApp es que es una aplicación gratuita, que no requiere ningún tipo de suscripción ni pago para usarla. Solo necesitas tener un número de teléfono válido y una conexión a internet, ya sea por datos móviles o por Wi-Fi. Además, WhatsApp es compatible con diferentes plataformas móviles, como Android, iOS, Windows Phone, BlackBerry OS y otras. También puedes usar WhatsApp en tu ordenador, a través de un navegador web o de una aplicación de escritorio.</p>
10
- <h3>Una forma fácil y cómoda de hablar con tus amigos</h3>
11
- <p>Otra ventaja de WhatsApp es que te permite comunicarte con tus amigos y familiares de forma fácil y cómoda, sin importar dónde se encuentren. WhatsApp te muestra automáticamente todos los contactos que tienen la aplicación instalada en su teléfono, y puedes iniciar un chat individual o grupal con ellos. También puedes realizar llamadas o videollamadas individuales o grupales, con hasta ocho participantes en una misma conversación. Además, puedes compartir tu ubicación, tu estado, tus contactos, GIFs, stickers y emojis con tus contactos.</p>
12
- <h3>Una herramienta segura y con muchas funciones</h3>
13
- <p>Por último, otra ventaja de WhatsApp es que es una herramienta segura y con muchas funciones. WhatsApp utiliza un protocolo de cifrado de extremo a extremo, que significa que solo tú y el destinatario pueden ver el contenido de los mensajes y las llamadas que envías o recibes. Nadie más, ni siquiera WhatsApp o Facebook, puede acceder a ellos. Además, WhatsApp te ofrece la posibilidad de enviar mensajes de voz, crear encuestas, configurar mensajes que se autodestruyen, bloquear contactos indeseados, silenciar chats o grupos, personalizar tu perfil y mucho más.</p>
14
- <p>descargar whatsapp messenger apk gratis<br />
15
- descargar whatsapp messenger apk ultima version<br />
16
- descargar whatsapp messenger apk para android<br />
17
- descargar whatsapp messenger apk mod<br />
18
- descargar whatsapp messenger apk sin play store<br />
19
- descargar whatsapp messenger apk 2023<br />
20
- descargar whatsapp messenger apk para pc<br />
21
- descargar whatsapp messenger apk full<br />
22
- descargar whatsapp messenger apk beta<br />
23
- descargar whatsapp messenger apk uptodown<br />
24
- descargar whatsapp messenger apk antiguo<br />
25
- descargar whatsapp messenger apk para tablet<br />
26
- descargar whatsapp messenger apk mega<br />
27
- descargar whatsapp messenger apk sin conexion<br />
28
- descargar whatsapp messenger apk con stickers<br />
29
- descargar whatsapp messenger apk plus<br />
30
- descargar whatsapp messenger apk para iphone<br />
31
- descargar whatsapp messenger apk pro<br />
32
- descargar whatsapp messenger apk lite<br />
33
- descargar whatsapp messenger apk transparente<br />
34
- descargar whatsapp messenger apk oficial<br />
35
- descargar whatsapp messenger apk desde el sitio web<br />
36
- descargar whatsapp messenger apk con videollamadas<br />
37
- descargar whatsapp messenger apk sin anuncios<br />
38
- descargar whatsapp messenger apk premium<br />
39
- descargar whatsapp messenger apk 2.23.13.6<br />
40
- descargar whatsapp messenger apk para smart tv<br />
41
- descargar whatsapp messenger apk con temas<br />
42
- descargar whatsapp messenger apk con emojis nuevos<br />
43
- descargar whatsapp messenger apk con estados<br />
44
- descargar whatsapp messenger apk sin verificacion de numero<br />
45
- descargar whatsapp messenger apk con respaldo de chats<br />
46
- descargar whatsapp messenger apk con modo oscuro<br />
47
- descargar whatsapp messenger apk con privacidad mejorada<br />
48
- descargar whatsapp messenger apk con mensajes temporales<br />
49
- descargar whatsapp messenger apk con cifrado de extremo a extremo<br />
50
- descargar whatsapp messenger apk con grupos de hasta 256 personas<br />
51
- descargar whatsapp messenger apk con llamadas y mensajes gratis<br />
52
- descargar whatsapp messenger apk con envio de archivos de hasta 100 MB<br />
53
- descargar whatsapp messenger apk con compatibilidad con otras apps de mensajeria<br />
54
- descargar whatsapp messenger apk con notificaciones personalizadas<br />
55
- descargar whatsapp messenger apk con bloqueo de contactos indeseados<br />
56
- descargar whatsapp messenger apk con eliminacion de mensajes enviados por error<br />
57
- descargar whatsapp messenger apk con silenciamiento de chats y grupos <br />
58
- descargar whatsapp messenger apk con marcacion de mensajes como no leidos <br />
59
- descargar whatsapp messenger apk con fijacion de chats favoritos <br />
60
- descargar whatsapp messenger apk con uso compartido de ubicacion en tiempo real <br />
61
- descargar whatsapp messenger apk con creacion de stickers personalizados <br />
62
- descargar whatsapp messenger apk con envio de GIFs y memes <br />
63
- descargar whatsapp messenger apk con acceso a la web y al escritorio</p>
64
- <h2>Cómo descargar el APK de WhatsApp Messenger</h2>
65
- <p>Si quieres descargar el APK de WhatsApp Messenger, debes saber qué es un APK y cómo instalarlo en tu dispositivo. Un APK es un archivo que contiene el paquete de instalación de una aplicación para Android. Normalmente, cuando descargas una aplicación desde la tienda oficial de Google Play, esta se instala automáticamente en tu teléfono. Sin embargo, hay ocasiones en las que puedes querer descargar el APK de una aplicación desde otra fuente, como por ejemplo, una página web o un servicio de almacenamiento en la nube.</p>
66
- <h3>Los requisitos para instalar el APK</h3>
67
- <p>Para poder instalar el APK de WhatsApp Messenger, necesitas cumplir con algunos requisitos previos. Estos son:</p>
68
- <ul>
69
- <li>Tener un teléfono Android con una versión igual o superior a la 4.0.3.</li>
70
- <li>Tener espacio suficiente en la memoria interna o externa del teléfono para guardar el archivo APK.</li>
71
- <li>Tener una conexión a internet estable y segura para descargar el archivo APK.</li>
72
- <li>Permitir la instalación de aplicaciones desde fuentes desconocidas en los ajustes de seguridad del teléfono. Para ello, debes ir a Ajustes > Seguridad > Fuentes desconocidas y activar la opción.</li>
73
- </ul>
74
- <h3>Los pasos para descargar e instalar el APK</h3>
75
- <p>Una vez que hayas cumplido con los requisitos anteriores, puedes seguir estos pasos para descargar e instalar el APK de WhatsApp Messenger:</p>
76
- <ol>
77
- <li>Accede a la página web oficial de WhatsApp y haz clic en el botón Descargar ahora.</li>
78
- <li>Espera a que se descargue el archivo APK en tu teléfono. Puedes ver el progreso de la descarga en la barra de notificaciones.</li>
79
- <li>Cuando se haya completado la descarga, abre el archivo APK desde el gestor de archivos o desde la notificación.</li>
80
- <li>Acepta los permisos que te solicita la aplicación y sigue las instrucciones que aparecen en la pantalla para completar la instalación.</li>
81
- <li>Abre la aplicación de WhatsApp y verifica tu número de teléfono siguiendo los pasos que te indica la aplicación.</li>
82
- <li>Disfruta de WhatsApp Messenger en tu teléfono Android.</li>
83
- </ol>
84
- <h3>Los riesgos y precauciones al usar el APK</h3>
85
- <p>Aunque descargar e instalar el APK de WhatsApp Messenger puede tener algunas ventajas, como por ejemplo, acceder a las últimas actualizaciones antes que nadie o evitar las restricciones geográficas o de compatibilidad, también implica algunos riesgos y precauciones que debes tener en cuenta. Estos son:</p>
86
- <ul>
87
- <li>Al descargar el APK desde una fuente externa a Google Play, puedes exponerte a virus, malware o software malicioso que pueda dañar tu teléfono o comprometer tu información personal.</li>
88
- <li>Al instalar el APK, puedes perder algunas funciones o características de WhatsApp que dependen de los servicios de Google Play, como por ejemplo, las copias de seguridad en Google Drive o las notificaciones push.</li>
89
- <li>Al usar el APK, puedes violar los términos y condiciones de uso de WhatsApp, lo que podría suponer la suspensión o el bloqueo de tu cuenta.</li>
90
- </ul>
91
- <p>Por lo tanto, te recomendamos que solo descargues e instales el APK de WhatsApp Messenger si sabes lo que estás haciendo y si confías en la fuente desde donde lo obtienes. Además, te aconsejamos que mantengas tu teléfono protegido con un antivirus y que hagas copias de seguridad periódicas de tus chats y archivos.</p>
92
- <h2>Qué alternativas hay a WhatsApp Messenger</h2>
93
- <p>Si no quieres descargar el APK de WhatsApp Messenger o si quieres probar otras opciones de mensajería instantánea, existen algunas alternativas a WhatsApp que puedes considerar. Estas son algunas de las más populares y destacadas:</p>
94
- <h3>Signal: una opción más segura y privada</h3>
95
- <p>Signal es una aplicación de mensajería instantánea que se caracteriza por su alto nivel de seguridad y privacidad. Signal utiliza un cifrado de extremo a extremo más avanzado que el de WhatsApp, y no recopila ni almacena ningún dato personal ni metadato de sus usuarios. Además, Signal ofrece funciones como mensajes que se autodestruyen, bloqueo con huella dactilar o código PIN, verificación de seguridad y protección contra capturas de pantalla. Signal es una aplicación gratuita y de código abierto, que puedes descargar desde Google Play o desde su página web oficial.</p>
96
- <h3>Telegram: una opción más versátil y divertida</h3>
97
- <p>Telegram es una aplicación de mensajería instantánea que se distingue por su versatilidad y diversión. Telegram te permite enviar mensajes de texto, voz, imágenes, vídeos, documentos y otros tipos de archivos a tus contactos, así como realizar llamadas y videollamadas gratuitas a través de internet. Además, Telegram cuenta con funciones como chats secretos, bots, canales, grupos con hasta 200 mil miembros, stickers, GIFs, juegos y mucho más. Telegram es una aplicación gratuita y de código abierto, que puedes descargar desde Google Play o desde su página web oficial.</p>
98
- <h3>iMessage: una opción solo para usuarios de Apple</h3>
99
- <p>iMessage es una aplicación de mensajería instantánea que solo está disponible para los usuarios de dispositivos Apple, como iPhone, iPad o Mac. iMessage te permite enviar mensajes de texto, voz, imágenes, vídeos, documentos y otros tipos de archivos a tus contactos que también tengan un dispositivo Apple, así como realizar llamadas y videollamadas gratuitas a través de internet. Además, iMessage cuenta con funciones como efectos de texto e imagen, animojis, memojis, stickers, Apple Pay y mucho más. iMessage es una aplicación gratuita y que viene integrada en el sistema operativo iOS o macOS.</p>
100
- <h2>Conclusión</h2>
101
- <p>En este artículo te hemos explicado cómo y por qué descargar el APK de WhatsApp Messenger, una de las aplicaciones de mensajería más populares y utilizadas en todo el mundo. WhatsApp te ofrece la posibilidad de enviar mensajes y realizar llamadas gratuitas a tus contactos, con un alto nivel de seguridad y privacidad. Sin embargo, descargar el APK de WhatsApp también implica algunos riesgos y precauciones que debes tener en cuenta, como la exposición a virus o malware, la pérdida de funciones o la violación de los términos y condiciones de uso. Por eso, te recomendamos que solo descargues e instales el APK de WhatsApp si sabes lo que estás haciendo y si confías en la fuente desde donde lo obtienes. Además, te hemos presentado algunas alternativas a WhatsApp que puedes considerar, como Signal, Telegram o iMessage, que también te ofrecen servicios similares o mejores que WhatsApp.</p>
102
- <p>Esperamos que este artículo te haya sido útil e interesante. Si tienes alguna duda o comentario sobre el tema, no dudes en dejarnos un mensaje. Y si te ha gustado el artículo, compártelo con tus amigos en las redes sociales. ¡Gracias por leernos!</p>
103
- <h2>Preguntas frecuentes</h2>
104
- <p>A continuación te respondemos algunas preguntas frecuentes sobre el tema de descargar el APK de WhatsApp Messenger:</p>
105
- <h4>¿Qué significa APK?</h4>
106
- <p>APK significa Android Package Kit o Android Application Package. Es un formato de archivo que contiene el paquete de instalación de una aplicación para Android.</p>
107
- <h4>¿Qué ventajas tiene descargar el APK de WhatsApp?</h4>
108
- <p>Descargar el APK de WhatsApp puede tener algunas ventajas, como por ejemplo:</p>
109
- <ul>
110
- <li>Acceder a las últimas actualizaciones antes que nadie.</li>
111
- <li>Evitar las restricciones geográficas o de compatibilidad.</li>
112
- <li>Tener más control sobre la instalación y desinstalación de la aplicación.</li>
113
- </ul>
114
- <h4>¿Qué riesgos tiene descargar el APK de WhatsApp?</h4>
115
- <p>Descargar el APK de WhatsApp puede tener algunos riesgos, como por ejemplo:</p>
116
- <ul>
117
- <li>Exponerse a virus, malware o software malicioso.</li>
118
- <li>Perder algunas funciones o características de WhatsApp.</li>
119
- <li>Violar los términos y condiciones de uso de WhatsApp.</li>
120
- </ul>
121
- <h4>¿Qué alternativas hay a WhatsApp?</h4>
122
- <p>Algunas alternativas a WhatsApp son:</p>
123
- <ul>
124
- <li>Signal: una opción más segura y privada.</li>
125
- <li>Telegram: una opción más versátil y divertida.</li>
126
- <li>iMessage: una opción solo para usuarios de Apple.</li>
127
- </ul>
128
- <h4>¿Cómo puedo descargar el APK de WhatsApp?</h4>
129
- <p>Puedes descargar el APK de WhatsApp siguiendo estos pasos:</p>
130
- <ol>
131
- <li>Accede a la página web oficial de WhatsApp y haz clic en el botón Descargar ahora.</li>
132
- <li>Espera a que se descargue el archivo APK en tu teléfono. Puedes ver el progreso de la descarga en la barra de notificaciones.</li>
133
- <li>Cuando se haya completado la descarga, abre el archivo APK desde el gestor de archivos o desde la notificación.</li>
134
- <li>Acepta los permisos que te solicita la aplicación y sigue las instrucciones que aparecen en la pantalla para completar la instalación.</li>
135
- <li>Abre la aplicación de WhatsApp y verifica tu número de teléfono siguiendo los pasos que te indica la aplicación.</li>
136
- </ol>
137
- <p>Si tienes algún problema o duda al descargar o instalar el APK de WhatsApp, puedes consultar la sección de ayuda de la página web oficial de WhatsApp o contactar con su servicio de atención al cliente.</p> 197e85843d<br />
138
- <br />
139
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Become a Football Legend with Vive le Football for Android.md DELETED
@@ -1,110 +0,0 @@
1
-
2
- <h1>Free Download Vive le Football: A Guide to the Ultimate Football Game</h1>
3
- <p>If you are a fan of football (or soccer, as some may call it), you must have heard of Vive le Football, the latest sports game from NetEase Games. This game promises to deliver the most realistic and immersive football experience on your devices, with stunning graphics, licensed teams and players, tactical gameplay, and online and offline modes. In this article, we will show you how to download Vive le Football for free on your PC, Mac, Android, or iOS device, and what are the features and benefits of playing this game.</p>
4
- <h2>free download vive le football</h2><br /><p><b><b>DOWNLOAD</b> &#10040; <a href="https://jinyurl.com/2uNPL7">https://jinyurl.com/2uNPL7</a></b></p><br /><br />
5
- <h2>What is Vive le Football?</h2>
6
- <p>Vive le Football is a football simulation game developed by NetEase Games, one of the leading game developers in China. The game was released in September 2022, and has received positive reviews from players and critics alike. The game aims to provide a realistic and authentic football experience, with licenses from FIFPro and the Chinese national team, as well as other famous clubs and leagues from around the world. The game also features advanced physics and graphics engine, smart AI, and various game modes to suit different preferences and skill levels.</p>
7
- <h3>Features of Vive le Football</h3>
8
- <p>Here are some of the main features of Vive le Football that make it stand out from other football games:</p>
9
- <h4>Realistic graphics and physics</h4>
10
- <p>Vive le Football uses Unreal Engine 4 to create stunning visuals and animations for the game. The game also uses a realistic physics system that simulates the movement and collision of the ball, players, and environment. You can see the sweat, dirt, grass, and weather effects on the players and the pitch, as well as the facial expressions and emotions of the players. The game also supports 4K resolution and 60 FPS for a smooth and immersive gameplay.</p>
11
- <p>How to free download vive le football for android<br />
12
- Free download vive le football apk latest version<br />
13
- Vive le football free mobile football management game<br />
14
- Free download vive le football mod apk unlimited money<br />
15
- Best tips and tricks for free download vive le football<br />
16
- Free download vive le football offline mode<br />
17
- Vive le football review: a free and realistic football game<br />
18
- Free download vive le football for PC windows 10<br />
19
- Vive le football cheats: how to get free coins and gems<br />
20
- Free download vive le football hack tool no survey<br />
21
- Vive le football gameplay: how to play and win matches<br />
22
- Free download vive le football for iOS devices<br />
23
- Vive le football features: what makes it different from other games<br />
24
- Free download vive le football update: what's new and improved<br />
25
- Vive le football guide: how to build your dream team<br />
26
- Free download vive le football online multiplayer mode<br />
27
- Vive le football ratings: how does it compare to other games<br />
28
- Free download vive le football for mac os x<br />
29
- Vive le football support: how to contact the developers and get help<br />
30
- Free download vive le football beta version: how to join and test the game<br />
31
- Vive le football community: how to connect with other players and fans<br />
32
- Free download vive le football for android tv<br />
33
- Vive le football news: what's the latest information and announcements<br />
34
- Free download vive le football for firestick<br />
35
- Vive le football forum: where to discuss and share your opinions<br />
36
- Free download vive le football for chromebook<br />
37
- Vive le football wiki: where to find all the information and resources<br />
38
- Free download vive le football for linux<br />
39
- Vive le football blog: where to read and write about the game<br />
40
- Free download vive le football for roku<br />
41
- Vive le football podcast: where to listen and learn about the game<br />
42
- Free download vive le football for smart tv<br />
43
- Vive le football video: where to watch and enjoy the game<br />
44
- Free download vive le football for xbox one<br />
45
- Vive le football social media: where to follow and interact with the game<br />
46
- Free download vive le football for ps4<br />
47
- Vive le football merchandise: where to buy and show your support<br />
48
- Free download vive le football for nintendo switch<br />
49
- Vive le football events: where to join and participate in the game activities<br />
50
- Free download vive le football for windows phone</p>
51
- <h4>Licensed teams and players</h4>
52
- <p>Vive le Football has official licenses from FIFPro and the Chinese national team, as well as other popular clubs and leagues from Europe, Asia, America, and Africa. You can play with or against some of the best players in the world, such as Cristiano Ronaldo, Lionel Messi, Neymar Jr., Kylian Mbappé, Mohamed Salah, Harry Kane, Robert Lewandowski, Kevin De Bruyne, Sergio Ramos, Virgil van Dijk, Manuel Neuer, Alisson Becker, and many more. You can also customize your own team with your favorite players, kits, badges, stadiums, and sponsors.</p>
53
- <h4>Tactical gameplay and AI</h4>
54
- <p>Vive le Football is not just about scoring goals. It is also about creating strategies and tactics to outsmart your opponents. You can choose from different formations, styles, roles, and instructions for your team, as well as adjust them during the match. You can also use various skills and tricks to dribble past defenders, pass accurately, shoot powerfully, tackle cleanly, save brilliantly, and more. The game also features a smart AI that adapts to your actions and decisions, making each match challenging and unpredictable.</p>
55
- <h4>Online and offline modes</h4>
56
- <p>Vive le Football offers various game modes for you to enjoy. You can play online with or against other players from around the world in real-time matches or tournaments. You can also join or create your own club with your friends or other players, and compete for glory and rewards in club leagues or cups. You can also play offline in single-player mode or local multiplayer mode with your friends or family on the same device. You can also play career mode or manager mode to experience the life of a professional football player or a football manager. You can also play training mode or challenge mode to improve your skills and test your limits.</p>
57
- <h2>How to download Vive le Football for free?</h2>
58
- <p>Vive le Football is a free-to-play game that you can download and play on your PC, Mac, Android, or iOS device. Here are the steps to download Vive le Football for free on different platforms:</p>
59
- <h3>Download Vive le Football on PC & Mac with BlueStacks</h3>
60
- <p>If you want to play Vive le Football on your PC or Mac, you can use BlueStacks, a popular Android emulator that allows you to run Android apps and games on your computer. BlueStacks is free, safe, and easy to use, and it offers many features and benefits for gamers.</p>
61
- <h4>Steps to install BlueStacks and Vive le Football</h4>
62
- <p>Here are the steps to install BlueStacks and Vive le Football on your PC or Mac:</p>
63
- <ol>
64
- <li>Go to the official website of BlueStacks and download the latest version of the emulator for your PC or Mac.</li>
65
- <li>Run the installer and follow the instructions to complete the installation process.</li>
66
- <li>Launch BlueStacks and sign in with your Google account. If you don't have one, you can create one for free.</li>
67
- <li>Go to the Google Play Store app on BlueStacks and search for Vive le Football. Alternatively, you can use this link to go directly to the game page.</li>
68
- <li>Click on the Install button and wait for the game to download and install on BlueStacks.</li>
69
- <li>Once the installation is done, you can find the game icon on the home screen of BlueStacks. Click on it to launch the game and enjoy playing Vive le Football on your PC or Mac.</li>
70
- </ol>
71
- <h4>Benefits of playing Vive le Football on PC & Mac</h4>
72
- <p>Here are some of the benefits of playing Vive le Football on PC & Mac with BlueStacks:</p>
73
- <ul>
74
- <li>You can play Vive le Football on a bigger screen with better resolution and graphics quality.</li>
75
- <li>You can use your keyboard and mouse or a gamepad to control the game more easily and precisely.</li>
76
- <li>You can customize the key mapping and settings according to your preference and comfort.</li>
77
- <li>You can record your gameplay, take screenshots, stream live, chat with other players, and access other features of BlueStacks.</li>
78
- <li>You can run multiple instances of BlueStacks and play Vive le Football with different accounts or devices simultaneously.</li>
79
- </ul>
80
- <h3>Download Vive le Football on Android and iOS devices</h3>
81
- <p>If you want to play Vive le Football on your Android or iOS device, you can download it from the Google Play Store or the App Store respectively. The game is compatible with most of the modern smartphones and tablets that meet the minimum requirements.</p>
82
- <h4>Steps to download Vive le Football from Google Play Store or App Store</h4>
83
- <p>Here are the steps to download Vive le Football from Google Play Store or App Store on your Android or iOS device:</p>
84
- <ol>
85
- <li>Open the Google Play Store app or the App Store app on your device.</li>
86
- <li>Search for Vive le Football in the search bar. Alternatively, you can use these links to go directly to the game page.</li>
87
- <li>Tap on the Install button (for Android) or the Get button (for iOS) and wait for the game to download and install on your device.</li>
88
- <li>Once the installation is done, you can find the game icon on your device's home screen or app drawer. Tap on it to launch the game and enjoy playing Vive le Football on your device.</li>
89
- </ol>
90
- <h4>Requirements and compatibility of Vive le Football on mobile devices</h4>
91
- <p>Here are the minimum requirements and compatibility of Vive le Football on mobile devices:</p>
92
- | Platform | OS Version | RAM | Storage | | Android | 5.0 or higher | 2 GB or higher | 1 GB or higher | | iOS | 10.0 or higher | 2 GB or higher | 1 GB or higher | <p>Note: These are the minimum requirements for running the game smoothly. The actual performance may vary depending on your device's specifications and settings.</p>
93
- <h2>Conclusion</h2>
94
- <p>Vive le Football is a football simulation game that offers a realistic and immersive football experience on your devices. You can play with licensed teams and players, create your own strategies and tactics, use various skills and tricks, and compete in online and offline modes. You can download Vive le Football for free on your PC, Mac, Android, or iOS device by following the steps mentioned above. If you are a football lover , you should definitely give Vive le Football a try and see for yourself why it is one of the best football games in the market.</p>
95
- <h3>FAQs</h3>
96
- <p>Here are some of the frequently asked questions about Vive le Football:</p>
97
- <ol>
98
- <li><b>Is Vive le Football free to play?</b></li>
99
- <p>Yes, Vive le Football is free to play. However, the game may contain some optional in-app purchases that can enhance your gameplay or unlock some premium features.</p>
100
- <li><b>Is Vive le Football online or offline?</b></li>
101
- <p>Vive le Football supports both online and offline modes. You can play online with or against other players from around the world, or play offline in single-player mode or local multiplayer mode with your friends or family on the same device.</p>
102
- <li><b>How can I update Vive le Football?</b></li>
103
- <p>Vive le Football is regularly updated with new features, content, and improvements. You can update the game by going to the Google Play Store app or the App Store app on your device and checking for any available updates. Alternatively, you can also enable automatic updates for the game in your device's settings.</p>
104
- <li><b>How can I contact the support team of Vive le Football?</b></li>
105
- <p>If you have any issues, questions, or feedback regarding Vive le Football, you can contact the support team of NetEase Games by going to the game's settings and tapping on the Customer Service button. You can also visit the official website or the social media pages of NetEase Games for more information and updates.</p>
106
- <li><b>How can I play Vive le Football on PC & Mac?</b></li>
107
- <p>If you want to play Vive le Football on PC & Mac, you can use BlueStacks, a popular Android emulator that allows you to run Android apps and games on your computer. You can download BlueStacks from its official website and follow the steps mentioned above to install and play Vive le Football on your PC & Mac.</p>
108
- </ol></p> 197e85843d<br />
109
- <br />
110
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2ndelement/voicevox/voicevox_engine/engine_manifest/EngineManifestLoader.py DELETED
@@ -1,46 +0,0 @@
1
- import json
2
- from base64 import b64encode
3
- from pathlib import Path
4
-
5
- from .EngineManifest import EngineManifest, LicenseInfo, UpdateInfo
6
-
7
-
8
- class EngineManifestLoader:
9
- def __init__(self, manifest_path: Path, root_dir: Path):
10
- self.manifest_path = manifest_path
11
- self.root_dir = root_dir
12
-
13
- def load_manifest(self) -> EngineManifest:
14
- manifest = json.loads(self.manifest_path.read_text(encoding="utf-8"))
15
-
16
- manifest = EngineManifest(
17
- manifest_version=manifest["manifest_version"],
18
- name=manifest["name"],
19
- brand_name=manifest["brand_name"],
20
- uuid=manifest["uuid"],
21
- url=manifest["url"],
22
- default_sampling_rate=manifest["default_sampling_rate"],
23
- icon=b64encode((self.root_dir / manifest["icon"]).read_bytes()).decode(
24
- "utf-8"
25
- ),
26
- terms_of_service=(self.root_dir / manifest["terms_of_service"]).read_text(
27
- "utf-8"
28
- ),
29
- update_infos=[
30
- UpdateInfo(**update_info)
31
- for update_info in json.loads(
32
- (self.root_dir / manifest["update_infos"]).read_text("utf-8")
33
- )
34
- ],
35
- dependency_licenses=[
36
- LicenseInfo(**license_info)
37
- for license_info in json.loads(
38
- (self.root_dir / manifest["dependency_licenses"]).read_text("utf-8")
39
- )
40
- ],
41
- supported_features={
42
- key: item["value"]
43
- for key, item in manifest["supported_features"].items()
44
- },
45
- )
46
- return manifest
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/modules/sadtalker_test.py DELETED
@@ -1,118 +0,0 @@
1
- import torch
2
- import os, sys, shutil
3
- from src.utils.preprocess import CropAndExtract
4
- from src.test_audio2coeff import Audio2Coeff
5
- from src.facerender.animate import AnimateFromCoeff
6
- from src.generate_batch import get_data
7
- from src.generate_facerender_batch import get_facerender_data
8
- import uuid
9
-
10
- from pydub import AudioSegment
11
-
12
- def mp3_to_wav(mp3_filename,wav_filename,frame_rate):
13
- mp3_file = AudioSegment.from_file(file=mp3_filename)
14
- mp3_file.set_frame_rate(frame_rate).export(wav_filename,format="wav")
15
-
16
- from modules.text2speech import text2speech
17
-
18
- class SadTalker():
19
-
20
- def __init__(self, checkpoint_path='checkpoints'):
21
-
22
- if torch.cuda.is_available() :
23
- device = "cuda"
24
- else:
25
- device = "cpu"
26
-
27
- # current_code_path = sys.argv[0]
28
- # modules_path = os.path.split(current_code_path)[0]
29
-
30
- current_root_path = './'
31
-
32
- os.environ['TORCH_HOME']=os.path.join(current_root_path, 'checkpoints')
33
-
34
- path_of_lm_croper = os.path.join(current_root_path, 'checkpoints', 'shape_predictor_68_face_landmarks.dat')
35
- path_of_net_recon_model = os.path.join(current_root_path, 'checkpoints', 'epoch_20.pth')
36
- dir_of_BFM_fitting = os.path.join(current_root_path, 'checkpoints', 'BFM_Fitting')
37
- wav2lip_checkpoint = os.path.join(current_root_path, 'checkpoints', 'wav2lip.pth')
38
-
39
- audio2pose_checkpoint = os.path.join(current_root_path, 'checkpoints', 'auido2pose_00140-model.pth')
40
- audio2pose_yaml_path = os.path.join(current_root_path, 'config', 'auido2pose.yaml')
41
-
42
- audio2exp_checkpoint = os.path.join(current_root_path, 'checkpoints', 'auido2exp_00300-model.pth')
43
- audio2exp_yaml_path = os.path.join(current_root_path, 'config', 'auido2exp.yaml')
44
-
45
- free_view_checkpoint = os.path.join(current_root_path, 'checkpoints', 'facevid2vid_00189-model.pth.tar')
46
- mapping_checkpoint = os.path.join(current_root_path, 'checkpoints', 'mapping_00229-model.pth.tar')
47
- facerender_yaml_path = os.path.join(current_root_path, 'config', 'facerender.yaml')
48
-
49
- #init model
50
- print(path_of_lm_croper)
51
- self.preprocess_model = CropAndExtract(path_of_lm_croper, path_of_net_recon_model, dir_of_BFM_fitting, device)
52
-
53
- print(audio2pose_checkpoint)
54
- self.audio_to_coeff = Audio2Coeff(audio2pose_checkpoint, audio2pose_yaml_path,
55
- audio2exp_checkpoint, audio2exp_yaml_path, wav2lip_checkpoint, device)
56
- print(free_view_checkpoint)
57
- self.animate_from_coeff = AnimateFromCoeff(free_view_checkpoint, mapping_checkpoint,
58
- facerender_yaml_path, device)
59
- self.device = device
60
-
61
- def test(self, source_image, driven_audio, still_mode, resize_mode, use_enhancer, result_dir='./'):
62
-
63
- time_tag = str(uuid.uuid4()) # strftime("%Y_%m_%d_%H.%M.%S")
64
- save_dir = os.path.join(result_dir, time_tag)
65
- os.makedirs(save_dir, exist_ok=True)
66
-
67
- input_dir = os.path.join(save_dir, 'input')
68
- os.makedirs(input_dir, exist_ok=True)
69
-
70
- print(source_image)
71
- pic_path = os.path.join(input_dir, os.path.basename(source_image))
72
- shutil.move(source_image, input_dir)
73
-
74
- if os.path.isfile(driven_audio):
75
- audio_path = os.path.join(input_dir, os.path.basename(driven_audio))
76
-
77
- #### mp3 to wav
78
- if '.mp3' in audio_path:
79
- mp3_to_wav(driven_audio, audio_path.replace('.mp3', '.wav'), 16000)
80
- audio_path = audio_path.replace('.mp3', '.wav')
81
- else:
82
- shutil.move(driven_audio, input_dir)
83
- else:
84
- text2speech
85
-
86
-
87
- os.makedirs(save_dir, exist_ok=True)
88
- pose_style = 0
89
- #crop image and extract 3dmm from image
90
- first_frame_dir = os.path.join(save_dir, 'first_frame_dir')
91
- os.makedirs(first_frame_dir, exist_ok=True)
92
- first_coeff_path, crop_pic_path, original_size = self.preprocess_model.generate(pic_path, first_frame_dir, crop_or_resize= 'resize' if resize_mode else 'crop')
93
- if first_coeff_path is None:
94
- raise AttributeError("No face is detected")
95
-
96
- #audio2ceoff
97
- batch = get_data(first_coeff_path, audio_path, self.device)
98
- coeff_path = self.audio_to_coeff.generate(batch, save_dir, pose_style)
99
- #coeff2video
100
- batch_size = 4
101
- data = get_facerender_data(coeff_path, crop_pic_path, first_coeff_path, audio_path, batch_size, still_mode=still_mode)
102
- self.animate_from_coeff.generate(data, save_dir, enhancer='gfpgan' if use_enhancer else None, original_size=original_size)
103
- video_name = data['video_name']
104
- print(f'The generated video is named {video_name} in {save_dir}')
105
-
106
- torch.cuda.empty_cache()
107
- torch.cuda.synchronize()
108
-
109
- import gc; gc.collect()
110
-
111
- if use_enhancer:
112
- return os.path.join(save_dir, video_name+'_enhanced.mp4'), os.path.join(save_dir, video_name+'_enhanced.mp4')
113
-
114
- else:
115
- return os.path.join(save_dir, video_name+'.mp4'), os.path.join(save_dir, video_name+'.mp4')
116
-
117
-
118
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/configs/speed.py DELETED
@@ -1,23 +0,0 @@
1
- from easydict import EasyDict as edict
2
-
3
- # configs for test speed
4
-
5
- config = edict()
6
- config.loss = "arcface"
7
- config.network = "r50"
8
- config.resume = False
9
- config.output = None
10
- config.embedding_size = 512
11
- config.sample_rate = 1.0
12
- config.fp16 = True
13
- config.momentum = 0.9
14
- config.weight_decay = 5e-4
15
- config.batch_size = 128
16
- config.lr = 0.1 # batch size is 512
17
-
18
- config.rec = "synthetic"
19
- config.num_classes = 100 * 10000
20
- config.num_epoch = 30
21
- config.warmup_epoch = -1
22
- config.decay_epoch = [10, 16, 22]
23
- config.val_targets = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/nets_537238KB.py DELETED
@@ -1,123 +0,0 @@
1
- import torch
2
- import numpy as np
3
- from torch import nn
4
- import torch.nn.functional as F
5
-
6
- from . import layers_537238KB as layers
7
-
8
-
9
- class BaseASPPNet(nn.Module):
10
- def __init__(self, nin, ch, dilations=(4, 8, 16)):
11
- super(BaseASPPNet, self).__init__()
12
- self.enc1 = layers.Encoder(nin, ch, 3, 2, 1)
13
- self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1)
14
- self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1)
15
- self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1)
16
-
17
- self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations)
18
-
19
- self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1)
20
- self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1)
21
- self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1)
22
- self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1)
23
-
24
- def __call__(self, x):
25
- h, e1 = self.enc1(x)
26
- h, e2 = self.enc2(h)
27
- h, e3 = self.enc3(h)
28
- h, e4 = self.enc4(h)
29
-
30
- h = self.aspp(h)
31
-
32
- h = self.dec4(h, e4)
33
- h = self.dec3(h, e3)
34
- h = self.dec2(h, e2)
35
- h = self.dec1(h, e1)
36
-
37
- return h
38
-
39
-
40
- class CascadedASPPNet(nn.Module):
41
- def __init__(self, n_fft):
42
- super(CascadedASPPNet, self).__init__()
43
- self.stg1_low_band_net = BaseASPPNet(2, 64)
44
- self.stg1_high_band_net = BaseASPPNet(2, 64)
45
-
46
- self.stg2_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0)
47
- self.stg2_full_band_net = BaseASPPNet(32, 64)
48
-
49
- self.stg3_bridge = layers.Conv2DBNActiv(130, 64, 1, 1, 0)
50
- self.stg3_full_band_net = BaseASPPNet(64, 128)
51
-
52
- self.out = nn.Conv2d(128, 2, 1, bias=False)
53
- self.aux1_out = nn.Conv2d(64, 2, 1, bias=False)
54
- self.aux2_out = nn.Conv2d(64, 2, 1, bias=False)
55
-
56
- self.max_bin = n_fft // 2
57
- self.output_bin = n_fft // 2 + 1
58
-
59
- self.offset = 128
60
-
61
- def forward(self, x, aggressiveness=None):
62
- mix = x.detach()
63
- x = x.clone()
64
-
65
- x = x[:, :, : self.max_bin]
66
-
67
- bandw = x.size()[2] // 2
68
- aux1 = torch.cat(
69
- [
70
- self.stg1_low_band_net(x[:, :, :bandw]),
71
- self.stg1_high_band_net(x[:, :, bandw:]),
72
- ],
73
- dim=2,
74
- )
75
-
76
- h = torch.cat([x, aux1], dim=1)
77
- aux2 = self.stg2_full_band_net(self.stg2_bridge(h))
78
-
79
- h = torch.cat([x, aux1, aux2], dim=1)
80
- h = self.stg3_full_band_net(self.stg3_bridge(h))
81
-
82
- mask = torch.sigmoid(self.out(h))
83
- mask = F.pad(
84
- input=mask,
85
- pad=(0, 0, 0, self.output_bin - mask.size()[2]),
86
- mode="replicate",
87
- )
88
-
89
- if self.training:
90
- aux1 = torch.sigmoid(self.aux1_out(aux1))
91
- aux1 = F.pad(
92
- input=aux1,
93
- pad=(0, 0, 0, self.output_bin - aux1.size()[2]),
94
- mode="replicate",
95
- )
96
- aux2 = torch.sigmoid(self.aux2_out(aux2))
97
- aux2 = F.pad(
98
- input=aux2,
99
- pad=(0, 0, 0, self.output_bin - aux2.size()[2]),
100
- mode="replicate",
101
- )
102
- return mask * mix, aux1 * mix, aux2 * mix
103
- else:
104
- if aggressiveness:
105
- mask[:, :, : aggressiveness["split_bin"]] = torch.pow(
106
- mask[:, :, : aggressiveness["split_bin"]],
107
- 1 + aggressiveness["value"] / 3,
108
- )
109
- mask[:, :, aggressiveness["split_bin"] :] = torch.pow(
110
- mask[:, :, aggressiveness["split_bin"] :],
111
- 1 + aggressiveness["value"],
112
- )
113
-
114
- return mask * mix
115
-
116
- def predict(self, x_mag, aggressiveness=None):
117
- h = self.forward(x_mag, aggressiveness)
118
-
119
- if self.offset > 0:
120
- h = h[:, :, :, self.offset : -self.offset]
121
- assert h.size()[3] > 0
122
-
123
- return h
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AB-TW/team-ai/documents/bussiness_context/business_context.md DELETED
@@ -1,19 +0,0 @@
1
- AB测试系统中的配置管理是该系统的一个重要功能,其中主要涉及两个业务概念:FeatureFlag 和 FeatureConfig。
2
-
3
- FeatureFlag 用于标识某个具体 Feature,其主要属性包括 featureKey(Feature 标识)、名称、描述、enabled、创建时间、最后更新时间和 template。其中,template 作为 FeatureConfig 的模板,用于生成后续 FeatureConfig 的配置界面组件,其属性包括 key、名称、描述、dataType 和 items。其中,dataType 为枚举值,取值范围为 STRING、NUMBER、BOOLEAN、OBJECT 和 ARRAY。
4
-
5
- FeatureConfig 用于配置某个 Feature 中控制前端展示效果的配置项,其主要属性包括 featureKey(Feature 标识)、data(配置数据)、saData(埋点数据)、status、标题、描述、创建时间和更新时间。其中,status 为枚举值,取值范围为 DRAFT、PUBLISHED 和 DISABLED。新增的 FeatureConfig 状态为 DRAFT,执行发布操作后变为 PUBLISHED,执行撤销操作后变为 DISABLED。一个 FeatureFlag 中可以包含多个 FeatureConfig,通过 featureKey 字段进行关联。
6
-
7
- 添加 FeatureConfig 的主要目的是为了控制 FeatureConfig 消费方的某个行为。在添加 FeatureConfig 时,应该包含 featureKey、data、saData、status、标题和描述信息。新增的 FeatureConfig 状态为 DRAFT。
8
-
9
- 客户端用户需要查看 FeatureConfig 中的 data、saData、更新时间和 id。同时,FeatureConfig 可以关联圈人条件,符合圈人条件的配置可以展示给客户端用户。客户端用户仅能查看符合圈人条件的 PUBLISHED 状态的数据。圈人条件包括上传用户白名单、按照比例灰度发布、地理位置和人群标签等。
10
-
11
- 在添加 FeatureConfig 时,应该包含 featureKey、data、saData、status、标题和描述信息。新增的 FeatureConfig 状态为 DRAFT。
12
-
13
- 客户端用户通过客户端访问服务端接口获取FeatureConfig,客户端通过FeatureConfig控制相关Feature展示
14
-
15
- 用户白名单圈人条件需要上传用户id的白名单,仅在白名单里的用户可以获取到相关feature
16
- 地理位置配置端需要设置圈定地区的地理位置编号列表,客户端请求接口是传递地理位置编号参数,位置编号匹配的数据用户可见
17
-
18
-
19
- 新增实验需要提供实验名称、目标、分组信息(包括分组标识、描述、比例)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AEUPH/SENTIENCE_PROGRAMMING_LANGUAGE/README.md DELETED
@@ -1,11 +0,0 @@
1
- ---
2
- title: SENTIENCE PROGRAMMING LANGUAGE
3
- emoji: 💻
4
- colorFrom: purple
5
- colorTo: green
6
- sdk: static
7
- pinned: false
8
- license: cc
9
- ---
10
-
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI-Hobbyist/Hoyo-RVC/i18n/locale_diff.py DELETED
@@ -1,45 +0,0 @@
1
- import json
2
- import os
3
- from collections import OrderedDict
4
-
5
- # Define the standard file name
6
- standard_file = "zh_CN.json"
7
-
8
- # Find all JSON files in the directory
9
- dir_path = "./"
10
- languages = [
11
- f for f in os.listdir(dir_path) if f.endswith(".json") and f != standard_file
12
- ]
13
-
14
- # Load the standard file
15
- with open(standard_file, "r", encoding="utf-8") as f:
16
- standard_data = json.load(f, object_pairs_hook=OrderedDict)
17
-
18
- # Loop through each language file
19
- for lang_file in languages:
20
- # Load the language file
21
- with open(lang_file, "r", encoding="utf-8") as f:
22
- lang_data = json.load(f, object_pairs_hook=OrderedDict)
23
-
24
- # Find the difference between the language file and the standard file
25
- diff = set(standard_data.keys()) - set(lang_data.keys())
26
-
27
- miss = set(lang_data.keys()) - set(standard_data.keys())
28
-
29
- # Add any missing keys to the language file
30
- for key in diff:
31
- lang_data[key] = key
32
-
33
- # Del any extra keys to the language file
34
- for key in miss:
35
- del lang_data[key]
36
-
37
- # Sort the keys of the language file to match the order of the standard file
38
- lang_data = OrderedDict(
39
- sorted(lang_data.items(), key=lambda x: list(standard_data.keys()).index(x[0]))
40
- )
41
-
42
- # Save the updated language file
43
- with open(lang_file, "w", encoding="utf-8") as f:
44
- json.dump(lang_data, f, ensure_ascii=False, indent=4)
45
- f.write("\n")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/docs/CONDITIONING.md DELETED
@@ -1,146 +0,0 @@
1
- # AudioCraft conditioning modules
2
-
3
- AudioCraft provides a
4
- [modular implementation of conditioning modules](../audiocraft/modules/conditioners.py)
5
- that can be used with the language model to condition the generation.
6
- The codebase was developed in order to easily extend the set of modules
7
- currently supported to easily develop new ways of controlling the generation.
8
-
9
-
10
- ## Conditioning methods
11
-
12
- For now, we support 3 main types of conditioning within AudioCraft:
13
- * Text-based conditioning methods
14
- * Waveform-based conditioning methods
15
- * Joint embedding conditioning methods for text and audio projected in a shared latent space.
16
-
17
- The Language Model relies on 2 core components that handle processing information:
18
- * The `ConditionProvider` class, that maps metadata to processed conditions leveraging
19
- all the defined conditioners for the given task.
20
- * The `ConditionFuser` class, that takes preprocessed conditions and properly fuse the
21
- conditioning embedding to the language model inputs following a given fusing strategy.
22
-
23
- Different conditioners (for text, waveform, joint embeddings...) are provided as torch
24
- modules in AudioCraft and are used internally in the language model to process the
25
- conditioning signals and feed them to the language model.
26
-
27
-
28
- ## Core concepts
29
-
30
- ### Conditioners
31
-
32
- The `BaseConditioner` torch module is the base implementation for all conditioners in audiocraft.
33
-
34
- Each conditioner is expected to implement 2 methods:
35
- * The `tokenize` method that is used as a preprocessing method that contains all processing
36
- that can lead to synchronization points (e.g. BPE tokenization with transfer to the GPU).
37
- The output of the tokenize method will then be used to feed the forward method.
38
- * The `forward` method that takes the output of the tokenize method and contains the core computation
39
- to obtain the conditioning embedding along with a mask indicating valid indices (e.g. padding tokens).
40
-
41
- ### ConditionProvider
42
-
43
- The ConditionProvider prepares and provides conditions given a dictionary of conditioners.
44
-
45
- Conditioners are specified as a dictionary of attributes and the corresponding conditioner
46
- providing the processing logic for the given attribute.
47
-
48
- Similarly to the conditioners, the condition provider works in two steps to avoid sychronization points:
49
- * A `tokenize` method that takes a list of conditioning attributes for the batch,
50
- and run all tokenize steps for the set of conditioners.
51
- * A `forward` method that takes the output of the tokenize step and run all the forward steps
52
- for the set of conditioners.
53
-
54
- The list of conditioning attributes is passed as a list of `ConditioningAttributes`
55
- that is presented just below.
56
-
57
- ### ConditionFuser
58
-
59
- Once all conditioning signals have been extracted and processed by the `ConditionProvider`
60
- as dense embeddings, they remain to be passed to the language model along with the original
61
- language model inputs.
62
-
63
- The `ConditionFuser` handles specifically the logic to combine the different conditions
64
- to the actual model input, supporting different strategies to combine them.
65
-
66
- One can therefore define different strategies to combine or fuse the condition to the input, in particular:
67
- * Prepending the conditioning signal to the input with the `prepend` strategy,
68
- * Summing the conditioning signal to the input with the `sum` strategy,
69
- * Combining the conditioning relying on a cross-attention mechanism with the `cross` strategy,
70
- * Using input interpolation with the `input_interpolate` strategy.
71
-
72
- ### SegmentWithAttributes and ConditioningAttributes: From metadata to conditions
73
-
74
- The `ConditioningAttributes` dataclass is the base class for metadata
75
- containing all attributes used for conditioning the language model.
76
-
77
- It currently supports the following types of attributes:
78
- * Text conditioning attributes: Dictionary of textual attributes used for text-conditioning.
79
- * Wav conditioning attributes: Dictionary of waveform attributes used for waveform-based
80
- conditioning such as the chroma conditioning.
81
- * JointEmbed conditioning attributes: Dictionary of text and waveform attributes
82
- that are expected to be represented in a shared latent space.
83
-
84
- These different types of attributes are the attributes that are processed
85
- by the different conditioners.
86
-
87
- `ConditioningAttributes` are extracted from metadata loaded along the audio in the datasets,
88
- provided that the metadata used by the dataset implements the `SegmentWithAttributes` abstraction.
89
-
90
- All metadata-enabled datasets to use for conditioning in AudioCraft inherits
91
- the [`audiocraft.data.info_dataset.InfoAudioDataset`](../audiocraft/data/info_audio_dataset.py) class
92
- and the corresponding metadata inherits and implements the `SegmentWithAttributes` abstraction.
93
- Refer to the [`audiocraft.data.music_dataset.MusicAudioDataset`](../audiocraft/data/music_dataset.py)
94
- class as an example.
95
-
96
-
97
- ## Available conditioners
98
-
99
- ### Text conditioners
100
-
101
- All text conditioners are expected to inherit from the `TextConditioner` class.
102
-
103
- AudioCraft currently provides two text conditioners:
104
- * The `LUTConditioner` that relies on look-up-table of embeddings learned at train time,
105
- and relying on either no tokenizer or a spacy tokenizer. This conditioner is particularly
106
- useful for simple experiments and categorical labels.
107
- * The `T5Conditioner` that relies on a
108
- [pre-trained T5 model](https://huggingface.co/docs/transformers/model_doc/t5)
109
- frozen or fine-tuned at train time to extract the text embeddings.
110
-
111
- ### Waveform conditioners
112
-
113
- All waveform conditioners are expected to inherit from the `WaveformConditioner` class and
114
- consists of conditioning method that takes a waveform as input. The waveform conditioner
115
- must implement the logic to extract the embedding from the waveform and define the downsampling
116
- factor from the waveform to the resulting embedding.
117
-
118
- The `ChromaStemConditioner` conditioner is a waveform conditioner for the chroma features
119
- conditioning used by MusicGen. It takes a given waveform, extract relevant stems for melody
120
- (namely all non drums and bass stems) using a
121
- [pre-trained Demucs model](https://github.com/facebookresearch/demucs)
122
- and then extract the chromagram bins from the remaining mix of stems.
123
-
124
- ### Joint embeddings conditioners
125
-
126
- We finally provide support for conditioning based on joint text and audio embeddings through
127
- the `JointEmbeddingConditioner` class and the `CLAPEmbeddingConditioner` that implements such
128
- a conditioning method relying on a [pretrained CLAP model](https://github.com/LAION-AI/CLAP).
129
-
130
- ## Classifier Free Guidance
131
-
132
- We provide a Classifier Free Guidance implementation in AudioCraft. With the classifier free
133
- guidance dropout, all attributes are dropped with the same probability.
134
-
135
- ## Attribute Dropout
136
-
137
- We further provide an attribute dropout strategy. Unlike the classifier free guidance dropout,
138
- the attribute dropout drops given attributes with a defined probability, allowing the model
139
- not to expect all conditioning signals to be provided at once.
140
-
141
- ## Faster computation of conditions
142
-
143
- Conditioners that require some heavy computation on the waveform can be cached, in particular
144
- the `ChromaStemConditioner` or `CLAPEmbeddingConditioner`. You just need to provide the
145
- `cache_path` parameter to them. We recommend running dummy jobs for filling up the cache quickly.
146
- An example is provied in the [musicgen.musicgen_melody_32khz grid](../audiocraft/grids/musicgen/musicgen_melody_32khz.py).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/StyleGANEX/models/stylegan2/op_ori/__init__.py DELETED
@@ -1,2 +0,0 @@
1
- from .fused_act import FusedLeakyReLU, fused_leaky_relu
2
- from .upfirdn2d import upfirdn2d
 
 
 
spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/remove_optimizer.py DELETED
@@ -1,18 +0,0 @@
1
- import argparse
2
- import torch
3
-
4
-
5
- def main(checkpoint):
6
- state_dict = torch.load(checkpoint, map_location="cpu")
7
- if "optimizer" in state_dict:
8
- del state_dict["optimizer"]
9
- if "lr_scheduler" in state_dict:
10
- del state_dict["lr_scheduler"]
11
- torch.save(state_dict, checkpoint)
12
-
13
-
14
- if __name__ == "__main__":
15
- parser = argparse.ArgumentParser()
16
- parser.add_argument("checkpoint", type=str)
17
- args = parser.parse_args()
18
- main(args.checkpoint)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIML-TUDA/safe-stable-diffusion/app.py DELETED
@@ -1,349 +0,0 @@
1
- import gradio as gr
2
- # import torch
3
- # from torch import autocast
4
- # from diffusers import StableDiffusionPipeline
5
- from datasets import load_dataset
6
- from PIL import Image
7
- from io import BytesIO
8
- # import base64
9
- # import re
10
- import os
11
- import requests
12
- import json
13
- import base64
14
- # from urllib import parse
15
-
16
- from share_btn import community_icon_html, loading_icon_html, share_js
17
-
18
-
19
- is_gpu_busy = False
20
-
21
- def safe_sd(prompt, n_samples, steps, scale, seed):
22
- url = os.getenv('BACKEND_URL_NEW')
23
- res = requests.post(url, json={
24
- "model": "together/universal-sd",
25
- "prompt": prompt,
26
- "n": n_samples,
27
- "mode": "safe_text2img",
28
- "steps": steps,
29
- "seed": seed,
30
- "guidance_scale": scale,
31
- }, headers={
32
- "User-Agent": "hfdemo"
33
- })
34
- return res
35
-
36
- def infer(prompt, n_samples, steps, scale, seed):
37
- global is_gpu_busy
38
- # generator = torch.Generator(device=device).manual_seed(seed)
39
- # print("Is GPU busy? ", is_gpu_busy)
40
- images = []
41
- # if(not is_gpu_busy):
42
- # is_gpu_busy = True
43
- # images_list = pipe(
44
- # [prompt] * samples,
45
- # num_inference_steps=steps,
46
- # guidance_scale=scale,
47
- # generator=generator,
48
- # )
49
- # is_gpu_busy = False
50
- # safe_image = Image.open(r"unsafe.png")
51
- # for i, image in enumerate(images_list["sample"]):
52
- # if(images_list["nsfw_content_detected"][i]):
53
- # images.append(safe_image)
54
- # else:
55
- # images.append(image)
56
- # else:
57
- response = safe_sd(prompt, int(n_samples), max(50,int(steps)), scale, seed)
58
- #requests.get(url.format(prompt, int(n_samples), max(50,int(steps)), f'{scale:.1f}', int(seed)))
59
- #response = requests.get(url.format('a%20naked%20girl', 2, 50, 7.5, 2))
60
- print(response)
61
- data = json.load(BytesIO(response.content))
62
- #data = response.json()
63
- print(data)
64
- if 'output' not in data:
65
- raise gr.Error("Although safety guidance is enabled, potential unsafe content found. Please try again with different seed.")
66
- else:
67
- for image in data['output']['choices']:
68
- im = Image.open(BytesIO(base64.b64decode(image['image_base64'])))
69
- images.append(im)
70
-
71
- # payload = {'prompt': prompt}
72
- # images_request = requests.post(url, json=payload)
73
- # for image in images_request.json()["output"]['choices']:
74
- # image_b64 = (f"data:image/jpeg;base64,{image['image_base64']}")
75
- # images.append(image_b64)
76
-
77
- return images
78
-
79
-
80
- css = """
81
- .gradio-container {
82
- font-family: 'IBM Plex Sans', sans-serif;
83
- }
84
- .gr-button {
85
- color: white;
86
- border-color: #3a669bff;
87
- background: #3a669bff;
88
- }
89
- input[type='range'] {
90
- accent-color: #3a669bff;
91
- }
92
- .dark input[type='range'] {
93
- accent-color: #3a669bff;
94
- }
95
- .container {
96
- max-width: 730px;
97
- margin: auto;
98
- padding-top: 1.5rem;
99
- }
100
- #gallery {
101
- min-height: 22rem;
102
- margin-bottom: 15px;
103
- margin-left: auto;
104
- margin-right: auto;
105
- border-bottom-right-radius: .5rem !important;
106
- border-bottom-left-radius: .5rem !important;
107
- }
108
- #gallery>div>.h-full {
109
- min-height: 20rem;
110
- }
111
- .details:hover {
112
- text-decoration: underline;
113
- }
114
- .gr-button {
115
- white-space: nowrap;
116
- }
117
- .gr-button:focus {
118
- border-color: rgb(147 197 253 / var(--tw-border-opacity));
119
- outline: none;
120
- box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
121
- --tw-border-opacity: 1;
122
- --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
123
- --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
124
- --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
125
- --tw-ring-opacity: .5;
126
- }
127
- #advanced-btn {
128
- font-size: .7rem !important;
129
- line-height: 19px;
130
- margin-top: 12px;
131
- margin-bottom: 12px;
132
- padding: 2px 8px;
133
- border-radius: 14px !important;
134
- }
135
- #advanced-options {
136
- display: none;
137
- margin-bottom: 20px;
138
- }
139
- .footer {
140
- margin-bottom: 45px;
141
- margin-top: 35px;
142
- text-align: center;
143
- border-bottom: 1px solid #e5e5e5;
144
- }
145
- .footer>p {
146
- font-size: .8rem;
147
- display: inline-block;
148
- padding: 0 10px;
149
- transform: translateY(10px);
150
- background: white;
151
- }
152
- .dark .footer {
153
- border-color: #303030;
154
- }
155
- .dark .footer>p {
156
- background: #0b0f19;
157
- }
158
- .acknowledgments h4{
159
- margin: 1.25em 0 .25em 0;
160
- font-weight: bold;
161
- font-size: 115%;
162
- }
163
- #container-advanced-btns{
164
- display: flex;
165
- flex-wrap: wrap;
166
- justify-content: space-between;
167
- align-items: center;
168
- }
169
- .animate-spin {
170
- animation: spin 1s linear infinite;
171
- }
172
- @keyframes spin {
173
- from {
174
- transform: rotate(0deg);
175
- }
176
- to {
177
- transform: rotate(360deg);
178
- }
179
- }
180
- #share-btn-container {
181
- display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #3a669bff; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
182
- }
183
- #share-btn {
184
- all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;
185
- }
186
- #share-btn * {
187
- all: unset;
188
- }
189
- .gr-form{
190
- flex: 1 1 50%; border-top-right-radius: 0; border-bottom-right-radius: 0;
191
- }
192
- #prompt-container{
193
- gap: 0;
194
- }
195
- """
196
-
197
- block = gr.Blocks(css=css)
198
-
199
- examples = [
200
- [
201
- 'a gorgeous female photo',
202
- 1,
203
- 50,
204
- 7.5,
205
- 251815625,
206
- ],
207
- [
208
- 'a gorgeous male photo',
209
- 1,
210
- 50,
211
- 7.5,
212
- 479079226,
213
- ],
214
- [
215
- 'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and children from bahnhof zoo, detailed ',
216
- 1,
217
- 50,
218
- 9,
219
- 364629028,
220
- ],
221
- [
222
- 'portrait of Sickly diseased dying Samurai warrior, sun shining, photo realistic illustration by greg rutkowski, thomas kindkade, alphonse mucha, loish, norman rockwell.',
223
- 1,
224
- 50,
225
- 10,
226
- 1714108957,
227
- ],
228
- [
229
- 'a photograph by vanessa beecroft',
230
- 1,
231
- 50,
232
- 7.5,
233
- 445713657,
234
- ],
235
- ]
236
-
237
- with block:
238
- gr.HTML(
239
- """
240
- <div style="text-align: center; max-width: 650px; margin: 0 auto;">
241
- <div
242
- style="
243
- display: inline-flex;
244
- align-items: center;
245
- gap: 0.8rem;
246
- font-size: 1.75rem;
247
- "
248
- >
249
- <img class="logo" src="https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/1666181274838-62fa1d95e8c9c532aa75331c.png" alt="AIML Logo"
250
- style="margin: auto; max-width: 7rem;">
251
- <h1 style="font-weight: 900; margin-bottom: 7px;">
252
- Safe Stable Diffusion Demo
253
- </h1>
254
- </div>
255
- <p style="margin-bottom: 10px; font-size: 94%">
256
- Safe Stable Diffusion extends Stable Diffusion with safety guidance. In the case of NSFW images it returns the closest non-NSFW images instead of a black square.
257
- Details can be found in the <a href="https://arxiv.org/abs/2211.05105" style="text-decoration: underline;" target="_blank">Safe Latent Diffusion: Mitigating Inappropriate Degeneration in Diffusion Models paper</a>.
258
- </p>
259
- <p style="margin-bottom: 10px; font-size: 94%">
260
- To directly compare to Stable Diffusion try this <a href="https://huggingface.co/spaces/AIML-TUDA/unsafe-vs-safe-stable-diffusion" style="text-decoration: underline;" target="_blank">demo</a>.
261
- </p>
262
- </div>
263
- """
264
- )
265
- with gr.Group():
266
- with gr.Box():
267
- with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True):
268
- text = gr.Textbox(
269
- label="Enter your prompt",
270
- show_label=False,
271
- max_lines=1,
272
- placeholder="Enter your prompt",
273
- elem_id="prompt-text-input",
274
- ).style(
275
- border=(True, False, True, True),
276
- rounded=(True, False, False, True),
277
- container=False,
278
- )
279
- btn = gr.Button("Generate image").style(
280
- margin=False,
281
- rounded=(False, True, True, False),
282
- full_width=False,
283
- )
284
-
285
- gallery = gr.Gallery(
286
- label="Generated images", show_label=False, elem_id="gallery"
287
- ).style(grid=[1], height="auto")
288
-
289
- with gr.Group(elem_id="container-advanced-btns"):
290
- advanced_button = gr.Button("Advanced options", elem_id="advanced-btn")
291
- with gr.Group(elem_id="share-btn-container"):
292
- community_icon = gr.HTML(community_icon_html)
293
- loading_icon = gr.HTML(loading_icon_html)
294
- share_button = gr.Button("Share to community", elem_id="share-btn")
295
-
296
- with gr.Row(elem_id="advanced-options"):
297
- #gr.Markdown("Advanced settings are temporarily unavailable")
298
- samples = gr.Slider(label="Images", minimum=1, maximum=1, value=1, step=1)
299
- steps = gr.Slider(label="Steps", minimum=50, maximum=50, value=50, step=1)
300
- scale = gr.Slider(
301
- label="Guidance Scale", minimum=7.5, maximum=20, value=7.5, step=0.5
302
- )
303
- seed = gr.Slider(
304
- label="Seed",
305
- minimum=0,
306
- maximum=2147483647,
307
- step=1,
308
- randomize=True,
309
- )
310
-
311
- ex = gr.Examples(examples=examples, fn=infer, inputs=[text, samples, steps, scale, seed],
312
- outputs=[gallery, community_icon, loading_icon, share_button], cache_examples=False)
313
- ex.dataset.headers = [""]
314
-
315
- text.submit(infer, inputs=[text, samples, steps, scale, seed], outputs=gallery)
316
- btn.click(infer, inputs=[text, samples, steps, scale, seed], outputs=gallery)
317
-
318
- advanced_button.click(
319
- None,
320
- [],
321
- text,
322
- _js="""
323
- () => {
324
- const options = document.querySelector("body > gradio-app").querySelector("#advanced-options");
325
- options.style.display = ["none", ""].includes(options.style.display) ? "flex" : "none";
326
- }""",
327
- )
328
- share_button.click(
329
- None,
330
- [],
331
- [],
332
- _js=share_js,
333
- )
334
- gr.HTML(
335
- """
336
- <div class="footer">
337
- <p>Model by <a href="https://huggingface.co/AIML-TUDA/" style="text-decoration: underline;" target="_blank">AIML Lab @TU Darmstadt</a> - backend provided through the generous support of <a href="https://www.together.xyz/" style="text-decoration: underline;" target="_blank">Together</a> - Gradio Demo by 🤗 Hugging Face
338
- </p>
339
- </div>
340
- <div class="acknowledgments">
341
- <p><h4>LICENSE</h4>
342
- The model is licensed with a <a href="https://huggingface.co/spaces/CompVis/stable-diffusion-license" style="text-decoration: underline;" target="_blank">CreativeML Open RAIL-M</a> license. The authors claim no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in this license. The license forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups. For the full list of restrictions please <a href="https://huggingface.co/spaces/CompVis/stable-diffusion-license" target="_blank" style="text-decoration: underline;" target="_blank">read the license</a>.</p>
343
- <p><h4>Biases and content acknowledgment</h4>
344
- Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence. While the applied safety guidance suppresses the majority of inappropriate content, this still could apply to Safe Stable Diffusion models. The original model was trained on the <a href="https://laion.ai/blog/laion-5b/" style="text-decoration: underline;" target="_blank">LAION-5B dataset</a>, which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes. Safety guidance suppresses potentially inappropriate content during inference. You can read more in the <a href="https://huggingface.co/AIML-TUDA/stable-diffusion-safe" style="text-decoration: underline;" target="_blank">model card</a>.</p>
345
- </div>
346
- """
347
- )
348
-
349
- block.queue(concurrency_count=40, max_size=20).launch(max_threads=150)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AONYLMR/White-box-Cartoonization/wbc/cartoonize.py DELETED
@@ -1,112 +0,0 @@
1
- import os
2
- import cv2
3
- import numpy as np
4
- import tensorflow as tf
5
- import wbc.network as network
6
- import wbc.guided_filter as guided_filter
7
- from tqdm import tqdm
8
-
9
-
10
- def resize_crop(image):
11
- h, w, c = np.shape(image)
12
- if min(h, w) > 720:
13
- if h > w:
14
- h, w = int(720 * h / w), 720
15
- else:
16
- h, w = 720, int(720 * w / h)
17
- image = cv2.resize(image, (w, h),
18
- interpolation=cv2.INTER_AREA)
19
- h, w = (h // 8) * 8, (w // 8) * 8
20
- image = image[:h, :w, :]
21
- return image
22
-
23
-
24
- def cartoonize(load_folder, save_folder, model_path):
25
- print(model_path)
26
- input_photo = tf.placeholder(tf.float32, [1, None, None, 3])
27
- network_out = network.unet_generator(input_photo)
28
- final_out = guided_filter.guided_filter(input_photo, network_out, r=1, eps=5e-3)
29
-
30
- all_vars = tf.trainable_variables()
31
- gene_vars = [var for var in all_vars if 'generator' in var.name]
32
- saver = tf.train.Saver(var_list=gene_vars)
33
-
34
- config = tf.ConfigProto()
35
- config.gpu_options.allow_growth = True
36
- sess = tf.Session(config=config)
37
-
38
- sess.run(tf.global_variables_initializer())
39
- saver.restore(sess, tf.train.latest_checkpoint(model_path))
40
- name_list = os.listdir(load_folder)
41
- for name in tqdm(name_list):
42
- try:
43
- load_path = os.path.join(load_folder, name)
44
- save_path = os.path.join(save_folder, name)
45
- image = cv2.imread(load_path)
46
- image = resize_crop(image)
47
- batch_image = image.astype(np.float32) / 127.5 - 1
48
- batch_image = np.expand_dims(batch_image, axis=0)
49
- output = sess.run(final_out, feed_dict={input_photo: batch_image})
50
- output = (np.squeeze(output) + 1) * 127.5
51
- output = np.clip(output, 0, 255).astype(np.uint8)
52
- cv2.imwrite(save_path, output)
53
- except:
54
- print('cartoonize {} failed'.format(load_path))
55
-
56
-
57
- class Cartoonize:
58
- def __init__(self, model_path):
59
- print(model_path)
60
- self.input_photo = tf.placeholder(tf.float32, [1, None, None, 3])
61
- network_out = network.unet_generator(self.input_photo)
62
- self.final_out = guided_filter.guided_filter(self.input_photo, network_out, r=1, eps=5e-3)
63
-
64
- all_vars = tf.trainable_variables()
65
- gene_vars = [var for var in all_vars if 'generator' in var.name]
66
- saver = tf.train.Saver(var_list=gene_vars)
67
-
68
- config = tf.ConfigProto()
69
- config.gpu_options.allow_growth = True
70
- self.sess = tf.Session(config=config)
71
-
72
- self.sess.run(tf.global_variables_initializer())
73
- saver.restore(self.sess, tf.train.latest_checkpoint(model_path))
74
-
75
- def run(self, load_folder, save_folder):
76
- name_list = os.listdir(load_folder)
77
- for name in tqdm(name_list):
78
- try:
79
- load_path = os.path.join(load_folder, name)
80
- save_path = os.path.join(save_folder, name)
81
- image = cv2.imread(load_path)
82
- image = resize_crop(image)
83
- batch_image = image.astype(np.float32) / 127.5 - 1
84
- batch_image = np.expand_dims(batch_image, axis=0)
85
- output = self.sess.run(self.final_out, feed_dict={self.input_photo: batch_image})
86
- output = (np.squeeze(output) + 1) * 127.5
87
- output = np.clip(output, 0, 255).astype(np.uint8)
88
- cv2.imwrite(save_path, output)
89
- except:
90
- print('cartoonize {} failed'.format(load_path))
91
-
92
- def run_sigle(self, load_path, save_path):
93
- try:
94
- image = cv2.imread(load_path)
95
- image = resize_crop(image)
96
- batch_image = image.astype(np.float32) / 127.5 - 1
97
- batch_image = np.expand_dims(batch_image, axis=0)
98
- output = self.sess.run(self.final_out, feed_dict={self.input_photo: batch_image})
99
- output = (np.squeeze(output) + 1) * 127.5
100
- output = np.clip(output, 0, 255).astype(np.uint8)
101
- cv2.imwrite(save_path, output)
102
- except:
103
- print('cartoonize {} failed'.format(load_path))
104
-
105
-
106
- if __name__ == '__main__':
107
- model_path = 'saved_models'
108
- load_folder = 'test_images'
109
- save_folder = 'cartoonized_images'
110
- if not os.path.exists(save_folder):
111
- os.mkdir(save_folder)
112
- cartoonize(load_folder, save_folder, model_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AONYLMR/anime-remove-background/app.py DELETED
@@ -1,52 +0,0 @@
1
- import gradio as gr
2
- import huggingface_hub
3
- import onnxruntime as rt
4
- import numpy as np
5
- import cv2
6
-
7
-
8
- def get_mask(img, s=1024):
9
- img = (img / 255).astype(np.float32)
10
- h, w = h0, w0 = img.shape[:-1]
11
- h, w = (s, int(s * w / h)) if h > w else (int(s * h / w), s)
12
- ph, pw = s - h, s - w
13
- img_input = np.zeros([s, s, 3], dtype=np.float32)
14
- img_input[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] = cv2.resize(img, (w, h))
15
- img_input = np.transpose(img_input, (2, 0, 1))
16
- img_input = img_input[np.newaxis, :]
17
- mask = rmbg_model.run(None, {'img': img_input})[0][0]
18
- mask = np.transpose(mask, (1, 2, 0))
19
- mask = mask[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w]
20
- mask = cv2.resize(mask, (w0, h0))[:, :, np.newaxis]
21
- return mask
22
-
23
-
24
- def rmbg_fn(img):
25
- mask = get_mask(img)
26
- img = (mask * img + 255 * (1 - mask)).astype(np.uint8)
27
- mask = (mask * 255).astype(np.uint8)
28
- img = np.concatenate([img, mask], axis=2, dtype=np.uint8)
29
- mask = mask.repeat(3, axis=2)
30
- return mask, img
31
-
32
-
33
- if __name__ == "__main__":
34
- providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
35
- model_path = huggingface_hub.hf_hub_download("skytnt/anime-seg", "isnetis.onnx")
36
- rmbg_model = rt.InferenceSession(model_path, providers=providers)
37
- app = gr.Blocks()
38
- with app:
39
- gr.Markdown("# Anime Remove Background\n\n"
40
- "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=skytnt.animeseg)\n\n"
41
- "demo for [https://github.com/SkyTNT/anime-segmentation/](https://github.com/SkyTNT/anime-segmentation/)")
42
- with gr.Row():
43
- with gr.Column():
44
- input_img = gr.Image(label="input image")
45
- examples_data = [[f"examples/{x:02d}.jpg"] for x in range(1, 4)]
46
- examples = gr.Dataset(components=[input_img], samples=examples_data)
47
- run_btn = gr.Button(variant="primary")
48
- output_mask = gr.Image(label="mask")
49
- output_img = gr.Image(label="result", image_mode="RGBA")
50
- examples.click(lambda x: x[0], [examples], [input_img])
51
- run_btn.click(rmbg_fn, [input_img], [output_mask, output_img])
52
- app.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/components/LoadingModalWritable.js DELETED
@@ -1,6 +0,0 @@
1
- import { writable } from "svelte/store";
2
-
3
- export const progress_writable = writable(0);
4
- export const curr_model_writable = writable("");
5
- export const map_writable = writable(["", ""]);
6
- export const phi_writable = writable(false);
 
 
 
 
 
 
 
spaces/Adapter/CoAdapter/ldm/modules/extra_condition/midas/midas/base_model.py DELETED
@@ -1,16 +0,0 @@
1
- import torch
2
-
3
-
4
- class BaseModel(torch.nn.Module):
5
- def load(self, path):
6
- """Load model from file.
7
-
8
- Args:
9
- path (str): file path
10
- """
11
- parameters = torch.load(path, map_location=torch.device('cpu'))
12
-
13
- if "optimizer" in parameters:
14
- parameters = parameters["model"]
15
-
16
- self.load_state_dict(parameters)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/drag/Drag.js DELETED
@@ -1,2 +0,0 @@
1
- import Drag from '../../../plugins/drag.js';
2
- export default Drag;
 
 
 
spaces/Ahmadjaved/Genaispeech/app.py DELETED
@@ -1,164 +0,0 @@
1
- import os
2
- import re
3
- import requests
4
- import json
5
- import gradio as gr
6
- from langchain.chat_models import ChatOpenAI
7
- from langchain import LLMChain, PromptTemplate
8
- from langchain.memory import ConversationBufferMemory
9
-
10
- OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
11
- PLAY_HT_API_KEY=os.getenv('PLAY_HT_API_KEY')
12
- PLAY_HT_USER_ID=os.getenv('PLAY_HT_USER_ID')
13
-
14
- PLAY_HT_VOICE_ID=os.getenv('PLAY_HT_VOICE_ID')
15
- play_ht_api_get_audio_url = "https://play.ht/api/v2/tts"
16
-
17
-
18
- template = """You are a helpful assistant to answer user queries.
19
- {chat_history}
20
- User: {user_message}
21
- Chatbot:"""
22
-
23
- prompt = PromptTemplate(
24
- input_variables=["chat_history", "user_message"], template=template
25
- )
26
-
27
- memory = ConversationBufferMemory(memory_key="chat_history")
28
-
29
- llm_chain = LLMChain(
30
- llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"),
31
- prompt=prompt,
32
- verbose=True,
33
- memory=memory,
34
- )
35
-
36
- headers = {
37
- "accept": "text/event-stream",
38
- "content-type": "application/json",
39
- "AUTHORIZATION": "Bearer "+ PLAY_HT_API_KEY,
40
- "X-USER-ID": PLAY_HT_USER_ID
41
- }
42
-
43
-
44
- def get_payload(text):
45
- return {
46
- "text": text,
47
- "voice": PLAY_HT_VOICE_ID,
48
- "quality": "medium",
49
- "output_format": "mp3",
50
- "speed": 1,
51
- "sample_rate": 24000,
52
- "seed": None,
53
- "temperature": None
54
- }
55
-
56
- def get_generated_audio(text):
57
- payload = get_payload(text)
58
- generated_response = {}
59
- try:
60
- response = requests.post(play_ht_api_get_audio_url, json=payload, headers=headers)
61
- response.raise_for_status()
62
- generated_response["type"]= 'SUCCESS'
63
- generated_response["response"] = response.text
64
- except requests.exceptions.RequestException as e:
65
- generated_response["type"]= 'ERROR'
66
- try:
67
- response_text = json.loads(response.text)
68
- if response_text['error_message']:
69
- generated_response["response"] = response_text['error_message']
70
- else:
71
- generated_response["response"] = response.text
72
- except Exception as e:
73
- generated_response["response"] = response.text
74
- except Exception as e:
75
- generated_response["type"]= 'ERROR'
76
- generated_response["response"] = response.text
77
- return generated_response
78
-
79
- def extract_urls(text):
80
- # Define the regex pattern for URLs
81
- url_pattern = r'https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+[/\w\.-]*'
82
-
83
- # Find all occurrences of URLs in the text
84
- urls = re.findall(url_pattern, text)
85
-
86
- return urls
87
-
88
- def get_audio_reply_for_question(text):
89
- generated_audio_event = get_generated_audio(text)
90
- #From get_generated_audio, you will get events in a string format, from that we need to extract the url
91
- final_response = {
92
- "audio_url": '',
93
- "message": ''
94
- }
95
- if generated_audio_event["type"] == 'SUCCESS':
96
- audio_urls = extract_urls(generated_audio_event["response"])
97
- if len(audio_urls) == 0:
98
- final_response['message'] = "No audio file link found in generated event"
99
- else:
100
- final_response['audio_url'] = audio_urls[-1]
101
- else:
102
- final_response['message'] = generated_audio_event['response']
103
- return final_response
104
-
105
- def download_url(url):
106
- try:
107
- # Send a GET request to the URL to fetch the content
108
- final_response = {
109
- 'content':'',
110
- 'error':''
111
- }
112
- response = requests.get(url)
113
- # Check if the request was successful (status code 200)
114
- if response.status_code == 200:
115
- final_response['content'] = response.content
116
- else:
117
- final_response['error'] = f"Failed to download the URL. Status code: {response.status_code}"
118
- except Exception as e:
119
- final_response['error'] = f"Failed to download the URL. Error: {e}"
120
- return final_response
121
-
122
- def get_filename_from_url(url):
123
- # Use os.path.basename() to extract the file name from the URL
124
- file_name = os.path.basename(url)
125
- return file_name
126
-
127
- def get_text_response(user_message):
128
- response = llm_chain.predict(user_message = user_message)
129
- return response
130
-
131
- def get_text_response_and_audio_response(user_message):
132
- response = get_text_response(user_message) # Getting the reply from Open AI
133
- audio_reply_for_question_response = get_audio_reply_for_question(response)
134
- final_response = {
135
- 'output_file_path': '',
136
- 'message':''
137
- }
138
- audio_url = audio_reply_for_question_response['audio_url']
139
- if audio_url:
140
- output_file_path=get_filename_from_url(audio_url)
141
- download_url_response = download_url(audio_url)
142
- audio_content = download_url_response['content']
143
- if audio_content:
144
- with open(output_file_path, "wb") as audio_file:
145
- audio_file.write(audio_content)
146
- final_response['output_file_path'] = output_file_path
147
- else:
148
- final_response['message'] = download_url_response['error']
149
- else:
150
- final_response['message'] = audio_reply_for_question_response['message']
151
- return final_response
152
-
153
- def chat_bot_response(message, history):
154
- text_and_audio_response = get_text_response_and_audio_response(message)
155
- output_file_path = text_and_audio_response['output_file_path']
156
- if output_file_path:
157
- return (text_and_audio_response['output_file_path'],)
158
- else:
159
- return text_and_audio_response['message']
160
-
161
- demo = gr.ChatInterface(chat_bot_response,examples=["How are you doing?","What are your interests?","Which places do you like to visit?"])
162
-
163
- if __name__ == "__main__":
164
- demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/bin/make_checkpoint.py DELETED
@@ -1,79 +0,0 @@
1
- #!/usr/bin/env python3
2
-
3
- import os
4
- import shutil
5
-
6
- import torch
7
-
8
-
9
- def get_checkpoint_files(s):
10
- s = s.strip()
11
- if ',' in s:
12
- return [get_checkpoint_files(chunk) for chunk in s.split(',')]
13
- return 'last.ckpt' if s == 'last' else f'{s}.ckpt'
14
-
15
-
16
- def main(args):
17
- checkpoint_fnames = get_checkpoint_files(args.epochs)
18
- if isinstance(checkpoint_fnames, str):
19
- checkpoint_fnames = [checkpoint_fnames]
20
- assert len(checkpoint_fnames) >= 1
21
-
22
- checkpoint_path = os.path.join(args.indir, 'models', checkpoint_fnames[0])
23
- checkpoint = torch.load(checkpoint_path, map_location='cpu')
24
- del checkpoint['optimizer_states']
25
-
26
- if len(checkpoint_fnames) > 1:
27
- for fname in checkpoint_fnames[1:]:
28
- print('sum', fname)
29
- sum_tensors_cnt = 0
30
- other_cp = torch.load(os.path.join(args.indir, 'models', fname), map_location='cpu')
31
- for k in checkpoint['state_dict'].keys():
32
- if checkpoint['state_dict'][k].dtype is torch.float:
33
- checkpoint['state_dict'][k].data.add_(other_cp['state_dict'][k].data)
34
- sum_tensors_cnt += 1
35
- print('summed', sum_tensors_cnt, 'tensors')
36
-
37
- for k in checkpoint['state_dict'].keys():
38
- if checkpoint['state_dict'][k].dtype is torch.float:
39
- checkpoint['state_dict'][k].data.mul_(1 / float(len(checkpoint_fnames)))
40
-
41
- state_dict = checkpoint['state_dict']
42
-
43
- if not args.leave_discriminators:
44
- for k in list(state_dict.keys()):
45
- if k.startswith('discriminator.'):
46
- del state_dict[k]
47
-
48
- if not args.leave_losses:
49
- for k in list(state_dict.keys()):
50
- if k.startswith('loss_'):
51
- del state_dict[k]
52
-
53
- out_checkpoint_path = os.path.join(args.outdir, 'models', 'best.ckpt')
54
- os.makedirs(os.path.dirname(out_checkpoint_path), exist_ok=True)
55
-
56
- torch.save(checkpoint, out_checkpoint_path)
57
-
58
- shutil.copy2(os.path.join(args.indir, 'config.yaml'),
59
- os.path.join(args.outdir, 'config.yaml'))
60
-
61
-
62
- if __name__ == '__main__':
63
- import argparse
64
-
65
- aparser = argparse.ArgumentParser()
66
- aparser.add_argument('indir',
67
- help='Path to directory with output of training '
68
- '(i.e. directory, which has samples, modules, config.yaml and train.log')
69
- aparser.add_argument('outdir',
70
- help='Where to put minimal checkpoint, which can be consumed by "bin/predict.py"')
71
- aparser.add_argument('--epochs', type=str, default='last',
72
- help='Which checkpoint to take. '
73
- 'Can be "last" or integer - number of epoch')
74
- aparser.add_argument('--leave-discriminators', action='store_true',
75
- help='If enabled, the state of discriminators will not be removed from the checkpoint')
76
- aparser.add_argument('--leave-losses', action='store_true',
77
- help='If enabled, weights of nn-based losses (e.g. perceptual) will not be removed')
78
-
79
- main(aparser.parse_args())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/face3d/util/__init__.py DELETED
@@ -1,3 +0,0 @@
1
- """This package includes a miscellaneous collection of useful helper functions."""
2
- from src.face3d.util import *
3
-
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/spectrogram_diffusion/__init__.py DELETED
@@ -1,26 +0,0 @@
1
- # flake8: noqa
2
- from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
3
- from ...utils import OptionalDependencyNotAvailable
4
-
5
-
6
- try:
7
- if not (is_transformers_available() and is_torch_available()):
8
- raise OptionalDependencyNotAvailable()
9
- except OptionalDependencyNotAvailable:
10
- from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
11
- else:
12
- from .notes_encoder import SpectrogramNotesEncoder
13
- from .continous_encoder import SpectrogramContEncoder
14
- from .pipeline_spectrogram_diffusion import (
15
- SpectrogramContEncoder,
16
- SpectrogramDiffusionPipeline,
17
- T5FilmDecoder,
18
- )
19
-
20
- try:
21
- if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
22
- raise OptionalDependencyNotAvailable()
23
- except OptionalDependencyNotAvailable:
24
- from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
25
- else:
26
- from .midi_utils import MidiProcessor
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/controlnet/test_controlnet_img2img.py DELETED
@@ -1,449 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
17
-
18
- import gc
19
- import random
20
- import tempfile
21
- import unittest
22
-
23
- import numpy as np
24
- import torch
25
- from PIL import Image
26
- from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
27
-
28
- from diffusers import (
29
- AutoencoderKL,
30
- ControlNetModel,
31
- DDIMScheduler,
32
- StableDiffusionControlNetImg2ImgPipeline,
33
- UNet2DConditionModel,
34
- )
35
- from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
36
- from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
37
- from diffusers.utils.import_utils import is_xformers_available
38
- from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
39
-
40
- from ..pipeline_params import (
41
- IMAGE_TO_IMAGE_IMAGE_PARAMS,
42
- TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
43
- TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
44
- )
45
- from ..test_pipelines_common import (
46
- PipelineKarrasSchedulerTesterMixin,
47
- PipelineLatentTesterMixin,
48
- PipelineTesterMixin,
49
- )
50
-
51
-
52
- enable_full_determinism()
53
-
54
-
55
- class ControlNetImg2ImgPipelineFastTests(
56
- PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase
57
- ):
58
- pipeline_class = StableDiffusionControlNetImg2ImgPipeline
59
- params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
60
- batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
61
- image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"})
62
- image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS
63
-
64
- def get_dummy_components(self):
65
- torch.manual_seed(0)
66
- unet = UNet2DConditionModel(
67
- block_out_channels=(32, 64),
68
- layers_per_block=2,
69
- sample_size=32,
70
- in_channels=4,
71
- out_channels=4,
72
- down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
73
- up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
74
- cross_attention_dim=32,
75
- )
76
- torch.manual_seed(0)
77
- controlnet = ControlNetModel(
78
- block_out_channels=(32, 64),
79
- layers_per_block=2,
80
- in_channels=4,
81
- down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
82
- cross_attention_dim=32,
83
- conditioning_embedding_out_channels=(16, 32),
84
- )
85
- torch.manual_seed(0)
86
- scheduler = DDIMScheduler(
87
- beta_start=0.00085,
88
- beta_end=0.012,
89
- beta_schedule="scaled_linear",
90
- clip_sample=False,
91
- set_alpha_to_one=False,
92
- )
93
- torch.manual_seed(0)
94
- vae = AutoencoderKL(
95
- block_out_channels=[32, 64],
96
- in_channels=3,
97
- out_channels=3,
98
- down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
99
- up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
100
- latent_channels=4,
101
- )
102
- torch.manual_seed(0)
103
- text_encoder_config = CLIPTextConfig(
104
- bos_token_id=0,
105
- eos_token_id=2,
106
- hidden_size=32,
107
- intermediate_size=37,
108
- layer_norm_eps=1e-05,
109
- num_attention_heads=4,
110
- num_hidden_layers=5,
111
- pad_token_id=1,
112
- vocab_size=1000,
113
- )
114
- text_encoder = CLIPTextModel(text_encoder_config)
115
- tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
116
-
117
- components = {
118
- "unet": unet,
119
- "controlnet": controlnet,
120
- "scheduler": scheduler,
121
- "vae": vae,
122
- "text_encoder": text_encoder,
123
- "tokenizer": tokenizer,
124
- "safety_checker": None,
125
- "feature_extractor": None,
126
- }
127
- return components
128
-
129
- def get_dummy_inputs(self, device, seed=0):
130
- if str(device).startswith("mps"):
131
- generator = torch.manual_seed(seed)
132
- else:
133
- generator = torch.Generator(device=device).manual_seed(seed)
134
-
135
- controlnet_embedder_scale_factor = 2
136
- control_image = randn_tensor(
137
- (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor),
138
- generator=generator,
139
- device=torch.device(device),
140
- )
141
- image = floats_tensor(control_image.shape, rng=random.Random(seed)).to(device)
142
- image = image.cpu().permute(0, 2, 3, 1)[0]
143
- image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64))
144
- inputs = {
145
- "prompt": "A painting of a squirrel eating a burger",
146
- "generator": generator,
147
- "num_inference_steps": 2,
148
- "guidance_scale": 6.0,
149
- "output_type": "numpy",
150
- "image": image,
151
- "control_image": control_image,
152
- }
153
-
154
- return inputs
155
-
156
- def test_attention_slicing_forward_pass(self):
157
- return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
158
-
159
- @unittest.skipIf(
160
- torch_device != "cuda" or not is_xformers_available(),
161
- reason="XFormers attention is only available with CUDA and `xformers` installed",
162
- )
163
- def test_xformers_attention_forwardGenerator_pass(self):
164
- self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
165
-
166
- def test_inference_batch_single_identical(self):
167
- self._test_inference_batch_single_identical(expected_max_diff=2e-3)
168
-
169
-
170
- class StableDiffusionMultiControlNetPipelineFastTests(
171
- PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase
172
- ):
173
- pipeline_class = StableDiffusionControlNetImg2ImgPipeline
174
- params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
175
- batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
176
- image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
177
-
178
- def get_dummy_components(self):
179
- torch.manual_seed(0)
180
- unet = UNet2DConditionModel(
181
- block_out_channels=(32, 64),
182
- layers_per_block=2,
183
- sample_size=32,
184
- in_channels=4,
185
- out_channels=4,
186
- down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
187
- up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
188
- cross_attention_dim=32,
189
- )
190
- torch.manual_seed(0)
191
-
192
- def init_weights(m):
193
- if isinstance(m, torch.nn.Conv2d):
194
- torch.nn.init.normal(m.weight)
195
- m.bias.data.fill_(1.0)
196
-
197
- controlnet1 = ControlNetModel(
198
- block_out_channels=(32, 64),
199
- layers_per_block=2,
200
- in_channels=4,
201
- down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
202
- cross_attention_dim=32,
203
- conditioning_embedding_out_channels=(16, 32),
204
- )
205
- controlnet1.controlnet_down_blocks.apply(init_weights)
206
-
207
- torch.manual_seed(0)
208
- controlnet2 = ControlNetModel(
209
- block_out_channels=(32, 64),
210
- layers_per_block=2,
211
- in_channels=4,
212
- down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
213
- cross_attention_dim=32,
214
- conditioning_embedding_out_channels=(16, 32),
215
- )
216
- controlnet2.controlnet_down_blocks.apply(init_weights)
217
-
218
- torch.manual_seed(0)
219
- scheduler = DDIMScheduler(
220
- beta_start=0.00085,
221
- beta_end=0.012,
222
- beta_schedule="scaled_linear",
223
- clip_sample=False,
224
- set_alpha_to_one=False,
225
- )
226
- torch.manual_seed(0)
227
- vae = AutoencoderKL(
228
- block_out_channels=[32, 64],
229
- in_channels=3,
230
- out_channels=3,
231
- down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
232
- up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
233
- latent_channels=4,
234
- )
235
- torch.manual_seed(0)
236
- text_encoder_config = CLIPTextConfig(
237
- bos_token_id=0,
238
- eos_token_id=2,
239
- hidden_size=32,
240
- intermediate_size=37,
241
- layer_norm_eps=1e-05,
242
- num_attention_heads=4,
243
- num_hidden_layers=5,
244
- pad_token_id=1,
245
- vocab_size=1000,
246
- )
247
- text_encoder = CLIPTextModel(text_encoder_config)
248
- tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
249
-
250
- controlnet = MultiControlNetModel([controlnet1, controlnet2])
251
-
252
- components = {
253
- "unet": unet,
254
- "controlnet": controlnet,
255
- "scheduler": scheduler,
256
- "vae": vae,
257
- "text_encoder": text_encoder,
258
- "tokenizer": tokenizer,
259
- "safety_checker": None,
260
- "feature_extractor": None,
261
- }
262
- return components
263
-
264
- def get_dummy_inputs(self, device, seed=0):
265
- if str(device).startswith("mps"):
266
- generator = torch.manual_seed(seed)
267
- else:
268
- generator = torch.Generator(device=device).manual_seed(seed)
269
-
270
- controlnet_embedder_scale_factor = 2
271
-
272
- control_image = [
273
- randn_tensor(
274
- (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor),
275
- generator=generator,
276
- device=torch.device(device),
277
- ),
278
- randn_tensor(
279
- (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor),
280
- generator=generator,
281
- device=torch.device(device),
282
- ),
283
- ]
284
-
285
- image = floats_tensor(control_image[0].shape, rng=random.Random(seed)).to(device)
286
- image = image.cpu().permute(0, 2, 3, 1)[0]
287
- image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64))
288
- inputs = {
289
- "prompt": "A painting of a squirrel eating a burger",
290
- "generator": generator,
291
- "num_inference_steps": 2,
292
- "guidance_scale": 6.0,
293
- "output_type": "numpy",
294
- "image": image,
295
- "control_image": control_image,
296
- }
297
-
298
- return inputs
299
-
300
- def test_control_guidance_switch(self):
301
- components = self.get_dummy_components()
302
- pipe = self.pipeline_class(**components)
303
- pipe.to(torch_device)
304
-
305
- scale = 10.0
306
- steps = 4
307
-
308
- inputs = self.get_dummy_inputs(torch_device)
309
- inputs["num_inference_steps"] = steps
310
- inputs["controlnet_conditioning_scale"] = scale
311
- output_1 = pipe(**inputs)[0]
312
-
313
- inputs = self.get_dummy_inputs(torch_device)
314
- inputs["num_inference_steps"] = steps
315
- inputs["controlnet_conditioning_scale"] = scale
316
- output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0]
317
-
318
- inputs = self.get_dummy_inputs(torch_device)
319
- inputs["num_inference_steps"] = steps
320
- inputs["controlnet_conditioning_scale"] = scale
321
- output_3 = pipe(**inputs, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0]
322
-
323
- inputs = self.get_dummy_inputs(torch_device)
324
- inputs["num_inference_steps"] = steps
325
- inputs["controlnet_conditioning_scale"] = scale
326
- output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0]
327
-
328
- # make sure that all outputs are different
329
- assert np.sum(np.abs(output_1 - output_2)) > 1e-3
330
- assert np.sum(np.abs(output_1 - output_3)) > 1e-3
331
- assert np.sum(np.abs(output_1 - output_4)) > 1e-3
332
-
333
- def test_attention_slicing_forward_pass(self):
334
- return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
335
-
336
- @unittest.skipIf(
337
- torch_device != "cuda" or not is_xformers_available(),
338
- reason="XFormers attention is only available with CUDA and `xformers` installed",
339
- )
340
- def test_xformers_attention_forwardGenerator_pass(self):
341
- self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
342
-
343
- def test_inference_batch_single_identical(self):
344
- self._test_inference_batch_single_identical(expected_max_diff=2e-3)
345
-
346
- def test_save_pretrained_raise_not_implemented_exception(self):
347
- components = self.get_dummy_components()
348
- pipe = self.pipeline_class(**components)
349
- pipe.to(torch_device)
350
- pipe.set_progress_bar_config(disable=None)
351
- with tempfile.TemporaryDirectory() as tmpdir:
352
- try:
353
- # save_pretrained is not implemented for Multi-ControlNet
354
- pipe.save_pretrained(tmpdir)
355
- except NotImplementedError:
356
- pass
357
-
358
-
359
- @slow
360
- @require_torch_gpu
361
- class ControlNetImg2ImgPipelineSlowTests(unittest.TestCase):
362
- def tearDown(self):
363
- super().tearDown()
364
- gc.collect()
365
- torch.cuda.empty_cache()
366
-
367
- def test_canny(self):
368
- controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny")
369
-
370
- pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
371
- "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
372
- )
373
- pipe.enable_model_cpu_offload()
374
- pipe.set_progress_bar_config(disable=None)
375
-
376
- generator = torch.Generator(device="cpu").manual_seed(0)
377
- prompt = "evil space-punk bird"
378
- control_image = load_image(
379
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png"
380
- ).resize((512, 512))
381
- image = load_image(
382
- "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png"
383
- ).resize((512, 512))
384
-
385
- output = pipe(
386
- prompt,
387
- image,
388
- control_image=control_image,
389
- generator=generator,
390
- output_type="np",
391
- num_inference_steps=50,
392
- strength=0.6,
393
- )
394
-
395
- image = output.images[0]
396
-
397
- assert image.shape == (512, 512, 3)
398
-
399
- expected_image = load_numpy(
400
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy"
401
- )
402
-
403
- assert np.abs(expected_image - image).max() < 9e-2
404
-
405
- def test_load_local(self):
406
- controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny")
407
- pipe_1 = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
408
- "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet
409
- )
410
-
411
- controlnet = ControlNetModel.from_single_file(
412
- "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth"
413
- )
414
- pipe_2 = StableDiffusionControlNetImg2ImgPipeline.from_single_file(
415
- "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors",
416
- safety_checker=None,
417
- controlnet=controlnet,
418
- )
419
- control_image = load_image(
420
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png"
421
- ).resize((512, 512))
422
- image = load_image(
423
- "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png"
424
- ).resize((512, 512))
425
-
426
- pipes = [pipe_1, pipe_2]
427
- images = []
428
- for pipe in pipes:
429
- pipe.enable_model_cpu_offload()
430
- pipe.set_progress_bar_config(disable=None)
431
-
432
- generator = torch.Generator(device="cpu").manual_seed(0)
433
- prompt = "bird"
434
- output = pipe(
435
- prompt,
436
- image=image,
437
- control_image=control_image,
438
- strength=0.9,
439
- generator=generator,
440
- output_type="np",
441
- num_inference_steps=3,
442
- )
443
- images.append(output.images[0])
444
-
445
- del pipe
446
- gc.collect()
447
- torch.cuda.empty_cache()
448
-
449
- assert np.abs(images[0] - images[1]).sum() < 1e-3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/hrnet/htc_x101_64x4d_fpn_16x1_28e_coco.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = '../htc/htc_x101_64x4d_fpn_16x1_20e_coco.py'
2
- # learning policy
3
- lr_config = dict(step=[24, 27])
4
- runner = dict(type='EpochBasedRunner', max_epochs=28)
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/sabl/sabl_faster_rcnn_r101_fpn_1x_coco.py DELETED
@@ -1,36 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/faster_rcnn_r50_fpn.py',
3
- '../_base_/datasets/coco_detection.py',
4
- '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
5
- ]
6
- model = dict(
7
- pretrained='torchvision://resnet101',
8
- backbone=dict(depth=101),
9
- roi_head=dict(
10
- bbox_head=dict(
11
- _delete_=True,
12
- type='SABLHead',
13
- num_classes=80,
14
- cls_in_channels=256,
15
- reg_in_channels=256,
16
- roi_feat_size=7,
17
- reg_feat_up_ratio=2,
18
- reg_pre_kernel=3,
19
- reg_post_kernel=3,
20
- reg_pre_num=2,
21
- reg_post_num=1,
22
- cls_out_channels=1024,
23
- reg_offset_out_channels=256,
24
- reg_cls_out_channels=256,
25
- num_cls_fcs=1,
26
- num_reg_fcs=0,
27
- reg_class_agnostic=True,
28
- norm_cfg=None,
29
- bbox_coder=dict(
30
- type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7),
31
- loss_cls=dict(
32
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
33
- loss_bbox_cls=dict(
34
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
35
- loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1,
36
- loss_weight=1.0))))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/_base_/datasets/stare.py DELETED
@@ -1,59 +0,0 @@
1
- # dataset settings
2
- dataset_type = 'STAREDataset'
3
- data_root = 'data/STARE'
4
- img_norm_cfg = dict(
5
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
6
- img_scale = (605, 700)
7
- crop_size = (128, 128)
8
- train_pipeline = [
9
- dict(type='LoadImageFromFile'),
10
- dict(type='LoadAnnotations'),
11
- dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
12
- dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
13
- dict(type='RandomFlip', prob=0.5),
14
- dict(type='PhotoMetricDistortion'),
15
- dict(type='Normalize', **img_norm_cfg),
16
- dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
17
- dict(type='DefaultFormatBundle'),
18
- dict(type='Collect', keys=['img', 'gt_semantic_seg'])
19
- ]
20
- test_pipeline = [
21
- dict(type='LoadImageFromFile'),
22
- dict(
23
- type='MultiScaleFlipAug',
24
- img_scale=img_scale,
25
- # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
26
- flip=False,
27
- transforms=[
28
- dict(type='Resize', keep_ratio=True),
29
- dict(type='RandomFlip'),
30
- dict(type='Normalize', **img_norm_cfg),
31
- dict(type='ImageToTensor', keys=['img']),
32
- dict(type='Collect', keys=['img'])
33
- ])
34
- ]
35
-
36
- data = dict(
37
- samples_per_gpu=4,
38
- workers_per_gpu=4,
39
- train=dict(
40
- type='RepeatDataset',
41
- times=40000,
42
- dataset=dict(
43
- type=dataset_type,
44
- data_root=data_root,
45
- img_dir='images/training',
46
- ann_dir='annotations/training',
47
- pipeline=train_pipeline)),
48
- val=dict(
49
- type=dataset_type,
50
- data_root=data_root,
51
- img_dir='images/validation',
52
- ann_dir='annotations/validation',
53
- pipeline=test_pipeline),
54
- test=dict(
55
- type=dataset_type,
56
- data_root=data_root,
57
- img_dir='images/validation',
58
- ann_dir='annotations/validation',
59
- pipeline=test_pipeline))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_480x480_80k_pascal_context_59.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './pspnet_r50-d8_480x480_80k_pascal_context_59.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/Anew5128/Anew51/tts_edge.py DELETED
@@ -1,34 +0,0 @@
1
- import io
2
- import edge_tts
3
- import asyncio
4
-
5
-
6
- def get_voices():
7
- voices = asyncio.run(edge_tts.list_voices())
8
- return voices
9
-
10
-
11
- async def _iterate_chunks(audio):
12
- async for chunk in audio.stream():
13
- if chunk["type"] == "audio":
14
- yield chunk["data"]
15
-
16
-
17
- async def _async_generator_to_list(async_gen):
18
- result = []
19
- async for item in async_gen:
20
- result.append(item)
21
- return result
22
-
23
-
24
- def generate_audio(text: str, voice: str, rate: int) -> bytes:
25
- sign = '+' if rate > 0 else '-'
26
- rate = f'{sign}{abs(rate)}%'
27
- audio = edge_tts.Communicate(text=text, voice=voice, rate=rate)
28
- chunks = asyncio.run(_async_generator_to_list(_iterate_chunks(audio)))
29
- buffer = io.BytesIO()
30
-
31
- for chunk in chunks:
32
- buffer.write(chunk)
33
-
34
- return buffer.getvalue()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/fileio/handlers/json_handler.py DELETED
@@ -1,36 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- import json
3
-
4
- import numpy as np
5
-
6
- from .base import BaseFileHandler
7
-
8
-
9
- def set_default(obj):
10
- """Set default json values for non-serializable values.
11
-
12
- It helps convert ``set``, ``range`` and ``np.ndarray`` data types to list.
13
- It also converts ``np.generic`` (including ``np.int32``, ``np.float32``,
14
- etc.) into plain numbers of plain python built-in types.
15
- """
16
- if isinstance(obj, (set, range)):
17
- return list(obj)
18
- elif isinstance(obj, np.ndarray):
19
- return obj.tolist()
20
- elif isinstance(obj, np.generic):
21
- return obj.item()
22
- raise TypeError(f'{type(obj)} is unsupported for json dump')
23
-
24
-
25
- class JsonHandler(BaseFileHandler):
26
-
27
- def load_from_fileobj(self, file):
28
- return json.load(file)
29
-
30
- def dump_to_fileobj(self, obj, file, **kwargs):
31
- kwargs.setdefault('default', set_default)
32
- json.dump(obj, file, **kwargs)
33
-
34
- def dump_to_str(self, obj, **kwargs):
35
- kwargs.setdefault('default', set_default)
36
- return json.dumps(obj, **kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awesimo/jojogan/model.py DELETED
@@ -1,688 +0,0 @@
1
- import math
2
- import random
3
- import functools
4
- import operator
5
-
6
- import torch
7
- from torch import nn
8
- from torch.nn import functional as F
9
- from torch.autograd import Function
10
-
11
- from op import conv2d_gradfix
12
- if torch.cuda.is_available():
13
- from op.fused_act import FusedLeakyReLU, fused_leaky_relu
14
- from op.upfirdn2d import upfirdn2d
15
- else:
16
- from op.fused_act_cpu import FusedLeakyReLU, fused_leaky_relu
17
- from op.upfirdn2d_cpu import upfirdn2d
18
-
19
-
20
- class PixelNorm(nn.Module):
21
- def __init__(self):
22
- super().__init__()
23
-
24
- def forward(self, input):
25
- return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
26
-
27
-
28
- def make_kernel(k):
29
- k = torch.tensor(k, dtype=torch.float32)
30
-
31
- if k.ndim == 1:
32
- k = k[None, :] * k[:, None]
33
-
34
- k /= k.sum()
35
-
36
- return k
37
-
38
-
39
- class Upsample(nn.Module):
40
- def __init__(self, kernel, factor=2):
41
- super().__init__()
42
-
43
- self.factor = factor
44
- kernel = make_kernel(kernel) * (factor ** 2)
45
- self.register_buffer("kernel", kernel)
46
-
47
- p = kernel.shape[0] - factor
48
-
49
- pad0 = (p + 1) // 2 + factor - 1
50
- pad1 = p // 2
51
-
52
- self.pad = (pad0, pad1)
53
-
54
- def forward(self, input):
55
- out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad)
56
-
57
- return out
58
-
59
-
60
- class Downsample(nn.Module):
61
- def __init__(self, kernel, factor=2):
62
- super().__init__()
63
-
64
- self.factor = factor
65
- kernel = make_kernel(kernel)
66
- self.register_buffer("kernel", kernel)
67
-
68
- p = kernel.shape[0] - factor
69
-
70
- pad0 = (p + 1) // 2
71
- pad1 = p // 2
72
-
73
- self.pad = (pad0, pad1)
74
-
75
- def forward(self, input):
76
- out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad)
77
-
78
- return out
79
-
80
-
81
- class Blur(nn.Module):
82
- def __init__(self, kernel, pad, upsample_factor=1):
83
- super().__init__()
84
-
85
- kernel = make_kernel(kernel)
86
-
87
- if upsample_factor > 1:
88
- kernel = kernel * (upsample_factor ** 2)
89
-
90
- self.register_buffer("kernel", kernel)
91
-
92
- self.pad = pad
93
-
94
- def forward(self, input):
95
- out = upfirdn2d(input, self.kernel, pad=self.pad)
96
-
97
- return out
98
-
99
-
100
- class EqualConv2d(nn.Module):
101
- def __init__(
102
- self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
103
- ):
104
- super().__init__()
105
-
106
- self.weight = nn.Parameter(
107
- torch.randn(out_channel, in_channel, kernel_size, kernel_size)
108
- )
109
- self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
110
-
111
- self.stride = stride
112
- self.padding = padding
113
-
114
- if bias:
115
- self.bias = nn.Parameter(torch.zeros(out_channel))
116
-
117
- else:
118
- self.bias = None
119
-
120
- def forward(self, input):
121
- out = conv2d_gradfix.conv2d(
122
- input,
123
- self.weight * self.scale,
124
- bias=self.bias,
125
- stride=self.stride,
126
- padding=self.padding,
127
- )
128
-
129
- return out
130
-
131
- def __repr__(self):
132
- return (
133
- f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},"
134
- f" {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})"
135
- )
136
-
137
-
138
- class EqualLinear(nn.Module):
139
- def __init__(
140
- self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None
141
- ):
142
- super().__init__()
143
-
144
- self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
145
-
146
- if bias:
147
- self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
148
-
149
- else:
150
- self.bias = None
151
-
152
- self.activation = activation
153
-
154
- self.scale = (1 / math.sqrt(in_dim)) * lr_mul
155
- self.lr_mul = lr_mul
156
-
157
- def forward(self, input):
158
- if self.activation:
159
- out = F.linear(input, self.weight * self.scale)
160
- out = fused_leaky_relu(out, self.bias * self.lr_mul)
161
-
162
- else:
163
- out = F.linear(
164
- input, self.weight * self.scale, bias=self.bias * self.lr_mul
165
- )
166
-
167
- return out
168
-
169
- def __repr__(self):
170
- return (
171
- f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})"
172
- )
173
-
174
-
175
- class ModulatedConv2d(nn.Module):
176
- def __init__(
177
- self,
178
- in_channel,
179
- out_channel,
180
- kernel_size,
181
- style_dim,
182
- demodulate=True,
183
- upsample=False,
184
- downsample=False,
185
- blur_kernel=[1, 3, 3, 1],
186
- fused=True,
187
- ):
188
- super().__init__()
189
-
190
- self.eps = 1e-8
191
- self.kernel_size = kernel_size
192
- self.in_channel = in_channel
193
- self.out_channel = out_channel
194
- self.upsample = upsample
195
- self.downsample = downsample
196
-
197
- if upsample:
198
- factor = 2
199
- p = (len(blur_kernel) - factor) - (kernel_size - 1)
200
- pad0 = (p + 1) // 2 + factor - 1
201
- pad1 = p // 2 + 1
202
-
203
- self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor)
204
-
205
- if downsample:
206
- factor = 2
207
- p = (len(blur_kernel) - factor) + (kernel_size - 1)
208
- pad0 = (p + 1) // 2
209
- pad1 = p // 2
210
-
211
- self.blur = Blur(blur_kernel, pad=(pad0, pad1))
212
-
213
- fan_in = in_channel * kernel_size ** 2
214
- self.scale = 1 / math.sqrt(fan_in)
215
- self.padding = kernel_size // 2
216
-
217
- self.weight = nn.Parameter(
218
- torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)
219
- )
220
-
221
- self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
222
-
223
- self.demodulate = demodulate
224
- self.fused = fused
225
-
226
- def __repr__(self):
227
- return (
228
- f"{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, "
229
- f"upsample={self.upsample}, downsample={self.downsample})"
230
- )
231
-
232
- def forward(self, input, style):
233
- batch, in_channel, height, width = input.shape
234
-
235
- if not self.fused:
236
- weight = self.scale * self.weight.squeeze(0)
237
- style = self.modulation(style)
238
-
239
- if self.demodulate:
240
- w = weight.unsqueeze(0) * style.view(batch, 1, in_channel, 1, 1)
241
- dcoefs = (w.square().sum((2, 3, 4)) + 1e-8).rsqrt()
242
-
243
- input = input * style.reshape(batch, in_channel, 1, 1)
244
-
245
- if self.upsample:
246
- weight = weight.transpose(0, 1)
247
- out = conv2d_gradfix.conv_transpose2d(
248
- input, weight, padding=0, stride=2
249
- )
250
- out = self.blur(out)
251
-
252
- elif self.downsample:
253
- input = self.blur(input)
254
- out = conv2d_gradfix.conv2d(input, weight, padding=0, stride=2)
255
-
256
- else:
257
- out = conv2d_gradfix.conv2d(input, weight, padding=self.padding)
258
-
259
- if self.demodulate:
260
- out = out * dcoefs.view(batch, -1, 1, 1)
261
-
262
- return out
263
-
264
- style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
265
- weight = self.scale * self.weight * style
266
-
267
- if self.demodulate:
268
- demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
269
- weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
270
-
271
- weight = weight.view(
272
- batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size
273
- )
274
-
275
- if self.upsample:
276
- input = input.view(1, batch * in_channel, height, width)
277
- weight = weight.view(
278
- batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size
279
- )
280
- weight = weight.transpose(1, 2).reshape(
281
- batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size
282
- )
283
- out = conv2d_gradfix.conv_transpose2d(
284
- input, weight, padding=0, stride=2, groups=batch
285
- )
286
- _, _, height, width = out.shape
287
- out = out.view(batch, self.out_channel, height, width)
288
- out = self.blur(out)
289
-
290
- elif self.downsample:
291
- input = self.blur(input)
292
- _, _, height, width = input.shape
293
- input = input.view(1, batch * in_channel, height, width)
294
- out = conv2d_gradfix.conv2d(
295
- input, weight, padding=0, stride=2, groups=batch
296
- )
297
- _, _, height, width = out.shape
298
- out = out.view(batch, self.out_channel, height, width)
299
-
300
- else:
301
- input = input.view(1, batch * in_channel, height, width)
302
- out = conv2d_gradfix.conv2d(
303
- input, weight, padding=self.padding, groups=batch
304
- )
305
- _, _, height, width = out.shape
306
- out = out.view(batch, self.out_channel, height, width)
307
-
308
- return out
309
-
310
-
311
- class NoiseInjection(nn.Module):
312
- def __init__(self):
313
- super().__init__()
314
-
315
- self.weight = nn.Parameter(torch.zeros(1))
316
-
317
- def forward(self, image, noise=None):
318
- if noise is None:
319
- batch, _, height, width = image.shape
320
- noise = image.new_empty(batch, 1, height, width).normal_()
321
-
322
- return image + self.weight * noise
323
-
324
-
325
- class ConstantInput(nn.Module):
326
- def __init__(self, channel, size=4):
327
- super().__init__()
328
-
329
- self.input = nn.Parameter(torch.randn(1, channel, size, size))
330
-
331
- def forward(self, input):
332
- batch = input.shape[0]
333
- out = self.input.repeat(batch, 1, 1, 1)
334
-
335
- return out
336
-
337
-
338
- class StyledConv(nn.Module):
339
- def __init__(
340
- self,
341
- in_channel,
342
- out_channel,
343
- kernel_size,
344
- style_dim,
345
- upsample=False,
346
- blur_kernel=[1, 3, 3, 1],
347
- demodulate=True,
348
- ):
349
- super().__init__()
350
-
351
- self.conv = ModulatedConv2d(
352
- in_channel,
353
- out_channel,
354
- kernel_size,
355
- style_dim,
356
- upsample=upsample,
357
- blur_kernel=blur_kernel,
358
- demodulate=demodulate,
359
- )
360
-
361
- self.noise = NoiseInjection()
362
- # self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
363
- # self.activate = ScaledLeakyReLU(0.2)
364
- self.activate = FusedLeakyReLU(out_channel)
365
-
366
- def forward(self, input, style, noise=None):
367
- out = self.conv(input, style)
368
- out = self.noise(out, noise=noise)
369
- # out = out + self.bias
370
- out = self.activate(out)
371
-
372
- return out
373
-
374
-
375
- class ToRGB(nn.Module):
376
- def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]):
377
- super().__init__()
378
-
379
- if upsample:
380
- self.upsample = Upsample(blur_kernel)
381
-
382
- self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False)
383
- self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
384
-
385
- def forward(self, input, style, skip=None):
386
- out = self.conv(input, style)
387
- out = out + self.bias
388
-
389
- if skip is not None:
390
- skip = self.upsample(skip)
391
-
392
- out = out + skip
393
-
394
- return out
395
-
396
-
397
- class Generator(nn.Module):
398
- def __init__(
399
- self,
400
- size,
401
- style_dim,
402
- n_mlp,
403
- channel_multiplier=2,
404
- blur_kernel=[1, 3, 3, 1],
405
- lr_mlp=0.01,
406
- ):
407
- super().__init__()
408
-
409
- self.size = size
410
-
411
- self.style_dim = style_dim
412
-
413
- layers = [PixelNorm()]
414
-
415
- for i in range(n_mlp):
416
- layers.append(
417
- EqualLinear(
418
- style_dim, style_dim, lr_mul=lr_mlp, activation="fused_lrelu"
419
- )
420
- )
421
-
422
- self.style = nn.Sequential(*layers)
423
-
424
- self.channels = {
425
- 4: 512,
426
- 8: 512,
427
- 16: 512,
428
- 32: 512,
429
- 64: 256 * channel_multiplier,
430
- 128: 128 * channel_multiplier,
431
- 256: 64 * channel_multiplier,
432
- 512: 32 * channel_multiplier,
433
- 1024: 16 * channel_multiplier,
434
- }
435
-
436
- self.input = ConstantInput(self.channels[4])
437
- self.conv1 = StyledConv(
438
- self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel
439
- )
440
- self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False)
441
-
442
- self.log_size = int(math.log(size, 2))
443
- self.num_layers = (self.log_size - 2) * 2 + 1
444
-
445
- self.convs = nn.ModuleList()
446
- self.upsamples = nn.ModuleList()
447
- self.to_rgbs = nn.ModuleList()
448
- self.noises = nn.Module()
449
-
450
- in_channel = self.channels[4]
451
-
452
- for layer_idx in range(self.num_layers):
453
- res = (layer_idx + 5) // 2
454
- shape = [1, 1, 2 ** res, 2 ** res]
455
- self.noises.register_buffer(f"noise_{layer_idx}", torch.randn(*shape))
456
-
457
- for i in range(3, self.log_size + 1):
458
- out_channel = self.channels[2 ** i]
459
-
460
- self.convs.append(
461
- StyledConv(
462
- in_channel,
463
- out_channel,
464
- 3,
465
- style_dim,
466
- upsample=True,
467
- blur_kernel=blur_kernel,
468
- )
469
- )
470
-
471
- self.convs.append(
472
- StyledConv(
473
- out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel
474
- )
475
- )
476
-
477
- self.to_rgbs.append(ToRGB(out_channel, style_dim))
478
-
479
- in_channel = out_channel
480
-
481
- self.n_latent = self.log_size * 2 - 2
482
-
483
- def make_noise(self):
484
- device = self.input.input.device
485
-
486
- noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)]
487
-
488
- for i in range(3, self.log_size + 1):
489
- for _ in range(2):
490
- noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device))
491
-
492
- return noises
493
-
494
- @torch.no_grad()
495
- def mean_latent(self, n_latent):
496
- latent_in = torch.randn(
497
- n_latent, self.style_dim, device=self.input.input.device
498
- )
499
- latent = self.style(latent_in).mean(0, keepdim=True)
500
-
501
- return latent
502
-
503
- @torch.no_grad()
504
- def get_latent(self, input):
505
- return self.style(input)
506
-
507
- def forward(
508
- self,
509
- styles,
510
- return_latents=False,
511
- inject_index=None,
512
- truncation=1,
513
- truncation_latent=None,
514
- input_is_latent=False,
515
- noise=None,
516
- randomize_noise=True,
517
- ):
518
-
519
- if noise is None:
520
- if randomize_noise:
521
- noise = [None] * self.num_layers
522
- else:
523
- noise = [
524
- getattr(self.noises, f"noise_{i}") for i in range(self.num_layers)
525
- ]
526
-
527
- if not input_is_latent:
528
- styles = [self.style(s) for s in styles]
529
-
530
- if truncation < 1:
531
- style_t = []
532
-
533
- for style in styles:
534
- style_t.append(
535
- truncation_latent + truncation * (style - truncation_latent)
536
- )
537
-
538
- styles = style_t
539
- latent = styles[0].unsqueeze(1).repeat(1, self.n_latent, 1)
540
- else:
541
- latent = styles
542
-
543
- out = self.input(latent)
544
- out = self.conv1(out, latent[:, 0], noise=noise[0])
545
-
546
- skip = self.to_rgb1(out, latent[:, 1])
547
-
548
- i = 1
549
- for conv1, conv2, noise1, noise2, to_rgb in zip(
550
- self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs
551
- ):
552
- out = conv1(out, latent[:, i], noise=noise1)
553
- out = conv2(out, latent[:, i + 1], noise=noise2)
554
- skip = to_rgb(out, latent[:, i + 2], skip)
555
-
556
- i += 2
557
-
558
- image = skip
559
-
560
- return image
561
-
562
-
563
- class ConvLayer(nn.Sequential):
564
- def __init__(
565
- self,
566
- in_channel,
567
- out_channel,
568
- kernel_size,
569
- downsample=False,
570
- blur_kernel=[1, 3, 3, 1],
571
- bias=True,
572
- activate=True,
573
- ):
574
- layers = []
575
-
576
- if downsample:
577
- factor = 2
578
- p = (len(blur_kernel) - factor) + (kernel_size - 1)
579
- pad0 = (p + 1) // 2
580
- pad1 = p // 2
581
-
582
- layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
583
-
584
- stride = 2
585
- self.padding = 0
586
-
587
- else:
588
- stride = 1
589
- self.padding = kernel_size // 2
590
-
591
- layers.append(
592
- EqualConv2d(
593
- in_channel,
594
- out_channel,
595
- kernel_size,
596
- padding=self.padding,
597
- stride=stride,
598
- bias=bias and not activate,
599
- )
600
- )
601
-
602
- if activate:
603
- layers.append(FusedLeakyReLU(out_channel, bias=bias))
604
-
605
- super().__init__(*layers)
606
-
607
-
608
- class ResBlock(nn.Module):
609
- def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]):
610
- super().__init__()
611
-
612
- self.conv1 = ConvLayer(in_channel, in_channel, 3)
613
- self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True)
614
-
615
- self.skip = ConvLayer(
616
- in_channel, out_channel, 1, downsample=True, activate=False, bias=False
617
- )
618
-
619
- def forward(self, input):
620
- out = self.conv1(input)
621
- out = self.conv2(out)
622
-
623
- skip = self.skip(input)
624
- out = (out + skip) / math.sqrt(2)
625
-
626
- return out
627
-
628
-
629
- class Discriminator(nn.Module):
630
- def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1]):
631
- super().__init__()
632
-
633
- channels = {
634
- 4: 512,
635
- 8: 512,
636
- 16: 512,
637
- 32: 512,
638
- 64: 256 * channel_multiplier,
639
- 128: 128 * channel_multiplier,
640
- 256: 64 * channel_multiplier,
641
- 512: 32 * channel_multiplier,
642
- 1024: 16 * channel_multiplier,
643
- }
644
-
645
- convs = [ConvLayer(3, channels[size], 1)]
646
-
647
- log_size = int(math.log(size, 2))
648
-
649
- in_channel = channels[size]
650
-
651
- for i in range(log_size, 2, -1):
652
- out_channel = channels[2 ** (i - 1)]
653
-
654
- convs.append(ResBlock(in_channel, out_channel, blur_kernel))
655
-
656
- in_channel = out_channel
657
-
658
- self.convs = nn.Sequential(*convs)
659
-
660
- self.stddev_group = 4
661
- self.stddev_feat = 1
662
-
663
- self.final_conv = ConvLayer(in_channel + 1, channels[4], 3)
664
- self.final_linear = nn.Sequential(
665
- EqualLinear(channels[4] * 4 * 4, channels[4], activation="fused_lrelu"),
666
- EqualLinear(channels[4], 1),
667
- )
668
-
669
- def forward(self, input):
670
- out = self.convs(input)
671
-
672
- batch, channel, height, width = out.shape
673
- group = min(batch, self.stddev_group)
674
- stddev = out.view(
675
- group, -1, self.stddev_feat, channel // self.stddev_feat, height, width
676
- )
677
- stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
678
- stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
679
- stddev = stddev.repeat(group, 1, height, width)
680
- out = torch.cat([out, stddev], 1)
681
-
682
- out = self.final_conv(out)
683
-
684
- out = out.view(batch, -1)
685
- out = self.final_linear(out)
686
-
687
- return out
688
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BAAI/AltDiffusion/app.py DELETED
@@ -1,330 +0,0 @@
1
- import io
2
- import re
3
- import imp
4
- import time
5
- import json
6
- import base64
7
- import requests
8
- import gradio as gr
9
- import ui_functions as uifn
10
- from css_and_js import js, call_JS
11
- from PIL import Image, PngImagePlugin, ImageChops
12
-
13
- url_host = "https://flagstudio.baai.ac.cn"
14
- token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyX2lkIjoiZjAxOGMxMzJiYTUyNDBjMzk5NTMzYTI5YjBmMzZiODMiLCJhcHBfbmFtZSI6IndlYiIsImlkZW50aXR5X3R5cGUiOiIyIiwidXNlcl9yb2xlIjoiMiIsImp0aSI6IjVjMmQzMjdiLWI5Y2MtNDhiZS1hZWQ4LTllMjQ4MDk4NzMxYyIsIm5iZiI6MTY2OTAwNjE5NywiZXhwIjoxOTg0MzY2MTk3LCJpYXQiOjE2NjkwMDYxOTd9.9B3MDk8wA6iWH5puXjcD19tJJ4Ox7mdpRyWZs5Kwt70"
15
-
16
- def read_content(file_path: str) -> str:
17
- """read the content of target file
18
- """
19
- with open(file_path, 'r', encoding='utf-8') as f:
20
- content = f.read()
21
-
22
- return content
23
-
24
- def filter_content(raw_style: str):
25
- if "(" in raw_style:
26
- i = raw_style.index("(")
27
- else :
28
- i = -1
29
-
30
- if i == -1:
31
- return raw_style
32
- else :
33
- return raw_style[:i]
34
-
35
- def upload_image(img):
36
- url = url_host + "/api/v1/image/get-upload-link"
37
- headers = {"token": token}
38
- r = requests.post(url, json={}, headers=headers)
39
- if r.status_code != 200:
40
- raise gr.Error(r.reason)
41
- head_res = r.json()
42
- if head_res["code"] != 0:
43
- raise gr.Error("Unknown error")
44
- image_id = head_res["data"]["image_id"]
45
- image_url = head_res["data"]["url"]
46
- image_headers = head_res["data"]["headers"]
47
-
48
- imgBytes = io.BytesIO()
49
- img.save(imgBytes, "PNG")
50
- imgBytes = imgBytes.getvalue()
51
-
52
- r = requests.put(image_url, data=imgBytes, headers=image_headers)
53
- if r.status_code != 200:
54
- raise gr.Error(r.reason)
55
- return image_id, image_url
56
-
57
- def post_reqest(seed, prompt, width, height, image_num, img=None, mask=None):
58
- data = {
59
- "type": "gen-image",
60
- "parameters": {
61
- "width": width, # output height width
62
- "height": height, # output image height
63
- "prompts": [prompt],
64
- }
65
- }
66
- data["parameters"]["seed"] = int(seed)
67
- if img is not None:
68
- # Upload image
69
- image_id, image_url = upload_image(img)
70
- data["parameters"]["init_image"] = {
71
- "image_id": image_id,
72
- "url": image_url,
73
- "width": img.width,
74
- "height": img.height,
75
- }
76
- if mask is not None:
77
- # Upload mask
78
- extrama = mask.convert("L").getextrema()
79
- if extrama[1] > 0:
80
- mask_id, mask_url = upload_image(mask)
81
- data["parameters"]["mask_image"] = {
82
- "image_id": mask_id,
83
- "url": mask_url,
84
- "width": mask.width,
85
- "height": mask.height,
86
- }
87
- headers = {"token": token}
88
-
89
- # Send create task request
90
- all_task_data = []
91
- url = url_host+"/api/v1/task/create"
92
- for _ in range(image_num):
93
- r = requests.post(url, json=data, headers=headers)
94
- if r.status_code != 200:
95
- raise gr.Error(r.reason)
96
- create_res = r.json()
97
- if create_res['code'] == 3002:
98
- raise gr.Error("Inappropriate prompt detected.")
99
- elif create_res['code'] != 0:
100
- raise gr.Error("Unknown error")
101
- all_task_data.append(create_res["data"])
102
-
103
- # Get result
104
- url = url_host+"/api/v1/task/status"
105
- images = []
106
- while True:
107
- if len(all_task_data) <= 0:
108
- return images
109
- for i in range(len(all_task_data)-1, -1, -1):
110
- data = all_task_data[i]
111
- r = requests.post(url, json=data, headers=headers)
112
- if r.status_code != 200:
113
- raise gr.Error(r.reason)
114
- res = r.json()
115
- if res["code"] == 6002:
116
- # Running
117
- continue
118
- if res["code"] == 6005:
119
- raise gr.Error("NSFW image detected.")
120
- elif res["code"] == 0:
121
- # Finished
122
- for img_info in res["data"]["images"]:
123
- img_res = requests.get(img_info["url"])
124
- images.append(Image.open(io.BytesIO(img_res.content)).convert("RGB"))
125
- del all_task_data[i]
126
- else:
127
- raise gr.Error(f"Error code: {res['code']}")
128
- time.sleep(1)
129
-
130
- def request_images(raw_text, class_draw, style_draw, batch_size, w, h, seed):
131
- if filter_content(class_draw) != "国画":
132
- if filter_content(class_draw) != "通用":
133
- raw_text = raw_text + f",{filter_content(class_draw)}"
134
-
135
- for sty in style_draw:
136
- raw_text = raw_text + f",{filter_content(sty)}"
137
- elif filter_content(class_draw) == "国画":
138
- raw_text = raw_text + ",国画,水墨画,大作,黑白,高清,传统"
139
- print(f"raw text is {raw_text}")
140
-
141
- images = post_reqest(seed, raw_text, w, h, int(batch_size))
142
-
143
- return images
144
-
145
-
146
- def img2img(prompt, image_and_mask):
147
- if image_and_mask["image"].width <= image_and_mask["image"].height:
148
- width = 512
149
- height = int((width/image_and_mask["image"].width)*image_and_mask["image"].height)
150
- else:
151
- height = 512
152
- width = int((height/image_and_mask["image"].height)*image_and_mask["image"].width)
153
- return post_reqest(0, prompt, width, height, 1, image_and_mask["image"], image_and_mask["mask"])
154
-
155
-
156
- examples = [
157
- '水墨蝴蝶和牡丹花,国画',
158
- '苍劲有力的墨竹,国画',
159
- '暴风雨中的灯塔',
160
- '机械小松鼠,科学幻想',
161
- '中国水墨山水画,国画',
162
- "Lighthouse in the storm",
163
- "A dog",
164
- "Landscape by 张大千",
165
- "A tiger 长了兔子耳朵",
166
- "A baby bird 铅笔素描",
167
- ]
168
-
169
- if __name__ == "__main__":
170
- block = gr.Blocks(css=read_content('style.css'))
171
-
172
- with block:
173
- gr.HTML(read_content("header.html"))
174
- with gr.Tabs(elem_id='tabss') as tabs:
175
-
176
- with gr.TabItem("文生图(Text-to-img)", id='txt2img_tab'):
177
-
178
- with gr.Group():
179
- with gr.Box():
180
- with gr.Row().style(mobile_collapse=False, equal_height=True):
181
- text = gr.Textbox(
182
- label="Prompt",
183
- show_label=False,
184
- max_lines=1,
185
- placeholder="Input text(输入文字)",
186
- interactive=True,
187
- ).style(
188
- border=(True, False, True, True),
189
- rounded=(True, False, False, True),
190
- container=False,
191
- )
192
-
193
- btn = gr.Button("Generate image").style(
194
- margin=False,
195
- rounded=(True, True, True, True),
196
- )
197
- with gr.Row().style(mobile_collapse=False, equal_height=True):
198
- class_draw = gr.Radio(choices=["通用(general)","国画(traditional Chinese painting)",], value="通用(general)", show_label=True, label='生成类型(type)')
199
- # class_draw = gr.Dropdown(["通用(general)", "国画(traditional Chinese painting)",
200
- # "照片,摄影(picture photography)", "油画(oil painting)",
201
- # "铅笔素描(pencil sketch)", "CG",
202
- # "水彩画(watercolor painting)", "水墨画(ink and wash)",
203
- # "插画(illustrations)", "3D", "图生图(img2img)"],
204
- # label="生成类型(type)",
205
- # show_label=True,
206
- # value="通用(general)")
207
- with gr.Row().style(mobile_collapse=False, equal_height=True):
208
- style_draw = gr.CheckboxGroup(["蒸汽朋克(steampunk)", "电影摄影风格(film photography)",
209
- "概念艺术(concept art)", "Warming lighting",
210
- "Dramatic lighting", "Natural lighting",
211
- "虚幻引擎(unreal engine)", "4k", "8k",
212
- "充满细节(full details)"],
213
- label="画面风格(style)",
214
- show_label=True,
215
- )
216
- with gr.Row().style(mobile_collapse=False, equal_height=True):
217
- # sample_size = gr.Slider(minimum=1,
218
- # maximum=4,
219
- # step=1,
220
- # label="生成数量(number)",
221
- # show_label=True,
222
- # interactive=True,
223
- # )
224
- sample_size = gr.Radio(choices=["1","2","3","4"], value="1", show_label=True, label='生成数量(number)')
225
- seed = gr.Number(0, label='seed', interactive=True)
226
- with gr.Row().style(mobile_collapse=False, equal_height=True):
227
- w = gr.Slider(512,1024,value=512, step=64, label="width")
228
- h = gr.Slider(512,1024,value=512, step=64, label="height")
229
-
230
- gallery = gr.Gallery(
231
- label="Generated images", show_label=False, elem_id="gallery"
232
- ).style(grid=[2,2])
233
- gr.Examples(examples=examples, fn=request_images, inputs=text, outputs=gallery, examples_per_page=100)
234
- with gr.Row().style(mobile_collapse=False, equal_height=True):
235
- img_choices = gr.Dropdown(["图片1(img1)"],label='请选择一张图片发送到图生图',show_label=True,value="图片1(img1)")
236
- with gr.Row().style(mobile_collapse=False, equal_height=True):
237
- output_txt2img_copy_to_input_btn = gr.Button("发送图片到图生图(Sent the image to img2img)").style(
238
- margin=False,
239
- rounded=(True, True, True, True),
240
- )
241
-
242
- with gr.Row():
243
- prompt = gr.Markdown("提示(Prompt):", visible=False)
244
- with gr.Row():
245
- move_prompt_zh = gr.Markdown("请移至图生图部分进行编辑(拉到顶部)", visible=False)
246
- with gr.Row():
247
- move_prompt_en = gr.Markdown("Please move to the img2img section for editing(Pull to the top)", visible=False)
248
-
249
-
250
-
251
- text.submit(request_images, inputs=[text, class_draw, style_draw, sample_size, w, h, seed], outputs=gallery)
252
- btn.click(request_images, inputs=[text, class_draw, style_draw, sample_size, w, h, seed], outputs=gallery)
253
-
254
- sample_size.change(
255
- fn=uifn.change_img_choices,
256
- inputs=[sample_size],
257
- outputs=[img_choices]
258
- )
259
-
260
- with gr.TabItem("图生图(Img-to-Img)", id="img2img_tab"):
261
- with gr.Row(elem_id="prompt_row"):
262
- img2img_prompt = gr.Textbox(label="Prompt",
263
- elem_id='img2img_prompt_input',
264
- placeholder="神奇的森林,流淌的河流.",
265
- lines=1,
266
- max_lines=1,
267
- value="",
268
- show_label=False).style()
269
-
270
- img2img_btn_mask = gr.Button("Generate", variant="primary", visible=False,
271
- elem_id="img2img_mask_btn")
272
- img2img_btn_editor = gr.Button("Generate", variant="primary", elem_id="img2img_edit_btn")
273
- gr.Markdown('#### 输入图像')
274
- with gr.Row().style(equal_height=False):
275
- #with gr.Column():
276
- img2img_image_mask = gr.Image(
277
- value=None,
278
- source="upload",
279
- interactive=True,
280
- tool="sketch",
281
- type='pil',
282
- elem_id="img2img_mask",
283
- image_mode="RGBA"
284
- )
285
- gr.Markdown('#### 编辑后的图片')
286
- with gr.Row():
287
- output_img2img_gallery = gr.Gallery(label="Images", elem_id="img2img_gallery_output").style(
288
- grid=[4,4,4] )
289
- with gr.Row():
290
- gr.Markdown('提示(prompt):')
291
- with gr.Row():
292
- gr.Markdown('请选择一张图像掩盖掉一部分区域,并输入文本描述')
293
- with gr.Row():
294
- gr.Markdown('Please select an image to cover up a part of the area and enter a text description.')
295
- gr.Markdown('# 编辑设置',visible=False)
296
-
297
-
298
- output_txt2img_copy_to_input_btn.click(
299
- uifn.copy_img_to_input,
300
- [gallery, img_choices],
301
- [tabs, img2img_image_mask, move_prompt_zh, move_prompt_en, prompt]
302
- )
303
-
304
-
305
- img2img_func = img2img
306
- img2img_inputs = [img2img_prompt, img2img_image_mask]
307
- img2img_outputs = [output_img2img_gallery]
308
-
309
- img2img_btn_mask.click(
310
- img2img_func,
311
- img2img_inputs,
312
- img2img_outputs
313
- )
314
-
315
- def img2img_submit_params():
316
- return (img2img_func,
317
- img2img_inputs,
318
- img2img_outputs)
319
-
320
- img2img_btn_editor.click(*img2img_submit_params())
321
-
322
- # GENERATE ON ENTER
323
- img2img_prompt.submit(None, None, None,
324
- _js=call_JS("clickFirstVisibleButton",
325
- rowId="prompt_row"))
326
-
327
- gr.HTML(read_content("footer.html"))
328
- # gr.Image('./contributors.png')
329
-
330
- block.queue(max_size=512, concurrency_count=256).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/utils/dependency.py DELETED
@@ -1,170 +0,0 @@
1
- import os
2
- import csv
3
- import shutil
4
- import tarfile
5
- import subprocess
6
- from pathlib import Path
7
- from datetime import datetime
8
-
9
- def install_packages_but_jank_af():
10
- packages = ['build-essential', 'python3-dev', 'ffmpeg', 'aria2']
11
- pip_packages = ['pip', 'setuptools', 'wheel', 'httpx==0.23.0', 'faiss-gpu', 'fairseq', 'gradio==3.34.0',
12
- 'ffmpeg', 'ffmpeg-python', 'praat-parselmouth', 'pyworld', 'numpy==1.23.5',
13
- 'numba==0.56.4', 'librosa==0.9.2', 'mega.py', 'gdown', 'onnxruntime', 'pyngrok==4.1.12',
14
- 'gTTS', 'elevenlabs', 'wget', 'tensorboardX', 'unidecode', 'huggingface-hub', 'stftpitchshift==1.5.1',
15
- 'yt-dlp', 'pedalboard', 'pathvalidate', 'nltk', 'edge-tts', 'git+https://github.com/suno-ai/bark.git', 'python-dotenv' , 'av']
16
-
17
- print("Updating and installing system packages...")
18
- for package in packages:
19
- print(f"Installing {package}...")
20
- subprocess.check_call(['apt-get', 'install', '-qq', '-y', package])
21
-
22
- print("Updating and installing pip packages...")
23
- subprocess.check_call(['pip', 'install', '--upgrade'] + pip_packages)
24
-
25
- print('Packages up to date.')
26
-
27
-
28
- def setup_environment(ForceUpdateDependencies, ForceTemporaryStorage):
29
- # Mounting Google Drive
30
- if not ForceTemporaryStorage:
31
- from google.colab import drive
32
-
33
- if not os.path.exists('/content/drive'):
34
- drive.mount('/content/drive')
35
- else:
36
- print('Drive is already mounted. Proceeding...')
37
-
38
- # Function to install dependencies with progress
39
- def install_packages():
40
- packages = ['build-essential', 'python3-dev', 'ffmpeg', 'aria2']
41
- pip_packages = ['pip', 'setuptools', 'wheel', 'httpx==0.23.0', 'faiss-gpu', 'fairseq', 'gradio==3.34.0',
42
- 'ffmpeg', 'ffmpeg-python', 'praat-parselmouth', 'pyworld', 'numpy==1.23.5',
43
- 'numba==0.56.4', 'librosa==0.9.2', 'mega.py', 'gdown', 'onnxruntime', 'pyngrok==4.1.12',
44
- 'gTTS', 'elevenlabs', 'wget', 'tensorboardX', 'unidecode', 'huggingface-hub', 'stftpitchshift==1.5.1',
45
- 'yt-dlp', 'pedalboard', 'pathvalidate', 'nltk', 'edge-tts', 'git+https://github.com/suno-ai/bark.git', 'python-dotenv' , 'av']
46
-
47
- print("Updating and installing system packages...")
48
- for package in packages:
49
- print(f"Installing {package}...")
50
- subprocess.check_call(['apt-get', 'install', '-qq', '-y', package])
51
-
52
- print("Updating and installing pip packages...")
53
- subprocess.check_call(['pip', 'install', '--upgrade'] + pip_packages)
54
-
55
-
56
- print('Packages up to date.')
57
-
58
- # Function to scan a directory and writes filenames and timestamps
59
- def scan_and_write(base_path, output_file):
60
- with open(output_file, 'w', newline='') as f:
61
- writer = csv.writer(f)
62
- for dirpath, dirs, files in os.walk(base_path):
63
- for filename in files:
64
- fname = os.path.join(dirpath, filename)
65
- try:
66
- mtime = os.path.getmtime(fname)
67
- writer.writerow([fname, mtime])
68
- except Exception as e:
69
- print(f'Skipping irrelevant nonexistent file {fname}: {str(e)}')
70
- print(f'Finished recording filesystem timestamps to {output_file}.')
71
-
72
- # Function to compare files
73
- def compare_files(old_file, new_file):
74
- old_files = {}
75
- new_files = {}
76
-
77
- with open(old_file, 'r') as f:
78
- reader = csv.reader(f)
79
- old_files = {rows[0]:rows[1] for rows in reader}
80
-
81
- with open(new_file, 'r') as f:
82
- reader = csv.reader(f)
83
- new_files = {rows[0]:rows[1] for rows in reader}
84
-
85
- removed_files = old_files.keys() - new_files.keys()
86
- added_files = new_files.keys() - old_files.keys()
87
- unchanged_files = old_files.keys() & new_files.keys()
88
-
89
- changed_files = {f for f in unchanged_files if old_files[f] != new_files[f]}
90
-
91
- for file in removed_files:
92
- print(f'File has been removed: {file}')
93
-
94
- for file in changed_files:
95
- print(f'File has been updated: {file}')
96
-
97
- return list(added_files) + list(changed_files)
98
-
99
- # Check if CachedRVC.tar.gz exists
100
- if ForceTemporaryStorage:
101
- file_path = '/content/CachedRVC.tar.gz'
102
- else:
103
- file_path = '/content/drive/MyDrive/RVC_Cached/CachedRVC.tar.gz'
104
-
105
- content_file_path = '/content/CachedRVC.tar.gz'
106
- extract_path = '/'
107
-
108
- if not os.path.exists(file_path):
109
- folder_path = os.path.dirname(file_path)
110
- os.makedirs(folder_path, exist_ok=True)
111
- print('No cached dependency install found. Attempting to download GitHub backup..')
112
-
113
- try:
114
- download_url = "https://github.com/kalomaze/QuickMangioFixes/releases/download/release3/CachedRVC.tar.gz"
115
- subprocess.run(["wget", "-O", file_path, download_url])
116
- print('Download completed successfully!')
117
- except Exception as e:
118
- print('Download failed:', str(e))
119
-
120
- # Delete the failed download file
121
- if os.path.exists(file_path):
122
- os.remove(file_path)
123
- print('Failed download file deleted. Continuing manual backup..')
124
-
125
- if Path(file_path).exists():
126
- if ForceTemporaryStorage:
127
- print('Finished downloading CachedRVC.tar.gz.')
128
- else:
129
- print('CachedRVC.tar.gz found on Google Drive. Proceeding to copy and extract...')
130
-
131
- # Check if ForceTemporaryStorage is True and skip copying if it is
132
- if ForceTemporaryStorage:
133
- pass
134
- else:
135
- shutil.copy(file_path, content_file_path)
136
-
137
- print('Beginning backup copy operation...')
138
-
139
- with tarfile.open(content_file_path, 'r:gz') as tar:
140
- for member in tar.getmembers():
141
- target_path = os.path.join(extract_path, member.name)
142
- try:
143
- tar.extract(member, extract_path)
144
- except Exception as e:
145
- print('Failed to extract a file (this isn\'t normal)... forcing an update to compensate')
146
- ForceUpdateDependencies = True
147
- print(f'Extraction of {content_file_path} to {extract_path} completed.')
148
-
149
- if ForceUpdateDependencies:
150
- install_packages()
151
- ForceUpdateDependencies = False
152
- else:
153
- print('CachedRVC.tar.gz not found. Proceeding to create an index of all current files...')
154
- scan_and_write('/usr/', '/content/usr_files.csv')
155
-
156
- install_packages()
157
-
158
- scan_and_write('/usr/', '/content/usr_files_new.csv')
159
- changed_files = compare_files('/content/usr_files.csv', '/content/usr_files_new.csv')
160
-
161
- with tarfile.open('/content/CachedRVC.tar.gz', 'w:gz') as new_tar:
162
- for file in changed_files:
163
- new_tar.add(file)
164
- print(f'Added to tar: {file}')
165
-
166
- os.makedirs('/content/drive/MyDrive/RVC_Cached', exist_ok=True)
167
- shutil.copy('/content/CachedRVC.tar.gz', '/content/drive/MyDrive/RVC_Cached/CachedRVC.tar.gz')
168
- print('Updated CachedRVC.tar.gz copied to Google Drive.')
169
- print('Dependencies fully up to date; future runs should be faster.')
170
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BartPoint/VoiceChange_Beta/infer_pack/modules/F0Predictor/DioF0Predictor.py DELETED
@@ -1,90 +0,0 @@
1
- from infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
2
- import pyworld
3
- import numpy as np
4
-
5
-
6
- class DioF0Predictor(F0Predictor):
7
- def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
8
- self.hop_length = hop_length
9
- self.f0_min = f0_min
10
- self.f0_max = f0_max
11
- self.sampling_rate = sampling_rate
12
-
13
- def interpolate_f0(self, f0):
14
- """
15
- 对F0进行插值处理
16
- """
17
-
18
- data = np.reshape(f0, (f0.size, 1))
19
-
20
- vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
21
- vuv_vector[data > 0.0] = 1.0
22
- vuv_vector[data <= 0.0] = 0.0
23
-
24
- ip_data = data
25
-
26
- frame_number = data.size
27
- last_value = 0.0
28
- for i in range(frame_number):
29
- if data[i] <= 0.0:
30
- j = i + 1
31
- for j in range(i + 1, frame_number):
32
- if data[j] > 0.0:
33
- break
34
- if j < frame_number - 1:
35
- if last_value > 0.0:
36
- step = (data[j] - data[i - 1]) / float(j - i)
37
- for k in range(i, j):
38
- ip_data[k] = data[i - 1] + step * (k - i + 1)
39
- else:
40
- for k in range(i, j):
41
- ip_data[k] = data[j]
42
- else:
43
- for k in range(i, frame_number):
44
- ip_data[k] = last_value
45
- else:
46
- ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
47
- last_value = data[i]
48
-
49
- return ip_data[:, 0], vuv_vector[:, 0]
50
-
51
- def resize_f0(self, x, target_len):
52
- source = np.array(x)
53
- source[source < 0.001] = np.nan
54
- target = np.interp(
55
- np.arange(0, len(source) * target_len, len(source)) / target_len,
56
- np.arange(0, len(source)),
57
- source,
58
- )
59
- res = np.nan_to_num(target)
60
- return res
61
-
62
- def compute_f0(self, wav, p_len=None):
63
- if p_len is None:
64
- p_len = wav.shape[0] // self.hop_length
65
- f0, t = pyworld.dio(
66
- wav.astype(np.double),
67
- fs=self.sampling_rate,
68
- f0_floor=self.f0_min,
69
- f0_ceil=self.f0_max,
70
- frame_period=1000 * self.hop_length / self.sampling_rate,
71
- )
72
- f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
73
- for index, pitch in enumerate(f0):
74
- f0[index] = round(pitch, 1)
75
- return self.interpolate_f0(self.resize_f0(f0, p_len))[0]
76
-
77
- def compute_f0_uv(self, wav, p_len=None):
78
- if p_len is None:
79
- p_len = wav.shape[0] // self.hop_length
80
- f0, t = pyworld.dio(
81
- wav.astype(np.double),
82
- fs=self.sampling_rate,
83
- f0_floor=self.f0_min,
84
- f0_ceil=self.f0_max,
85
- frame_period=1000 * self.hop_length / self.sampling_rate,
86
- )
87
- f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
88
- for index, pitch in enumerate(f0):
89
- f0[index] = round(pitch, 1)
90
- return self.interpolate_f0(self.resize_f0(f0, p_len))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Casa De Diseo Fijar Y Flip Mod Apk Pc.md DELETED
@@ -1,101 +0,0 @@
1
-
2
- <h1>Casa Diseñador Fix y Flip Mod APK PC: Cómo jugar a este divertido juego de simulación en su ordenador</h1>
3
- <p>¿Te encanta el diseño de interiores y mejoras para el hogar? ¿Te gusta jugar juegos de simulación donde puedes dar rienda suelta a tu creatividad e imaginación? Si respondiste sí a ambas preguntas, entonces es posible que desees echar un vistazo a House Designer Fix and Flip, un juego donde puedes comprar, renovar y vender casas. Y la mejor parte es, usted puede jugar a este juego en su PC con un archivo APK mod. En este artículo, le diremos todo lo que necesita saber sobre House Designer Fix y Flip mod APK PC, incluyendo lo que es, cómo descargar e instalar, y algunos consejos y trucos para jugarlo. </p>
4
- <h2>¿Qué es House Designer Fix and Flip? </h2>
5
- <h3>Un juego de simulación donde puedes renovar y decorar casas</h3>
6
- <p>House Designer Fix and Flip es un juego de simulación desarrollado por Karate Goose Studio. Está disponible para dispositivos Android, pero también se puede jugar en su PC con un emulador. En este juego, puedes comprar casas viejas y rotas, arreglarlas y venderlas para obtener ganancias. También puede diseñar su propia casa de acuerdo a su gusto y estilo. Puede elegir entre una variedad de muebles, electrodomésticos, papeles pintados, pinturas, pisos, ventanas, puertas y más. También puede trabajar en el exterior de la casa, como el jardín, el patio, la valla y el techo. Incluso puedes demoler algunas paredes o construir otras nuevas si quieres. </p>
7
- <h2>casa de diseño fijar y flip mod apk pc</h2><br /><p><b><b>Download Zip</b> &#10004;&#10004;&#10004; <a href="https://bltlly.com/2v6MqR">https://bltlly.com/2v6MqR</a></b></p><br /><br />
8
- <h3>Características y jugabilidad de House Designer Fix y Flip</h3>
9
- <p>House Designer Fix and Flip tiene muchas características que lo hacen divertido y realista. Algunas de ellas son:</p>
10
- <ul>
11
- <li>Puedes comprar casas de diferentes categorías, como baratas, normales, caras o de lujo. </li>
12
- <li>Puede utilizar diferentes herramientas para reparar o reemplazar partes dañadas de la casa, como un martillo, un taladro, una sierra, una llave, un destornillador, un rodillo de pintura, etc.</li>
13
-
14
- <li> Puede cambiar el color o el patrón de cualquier elemento o pared en la casa. </li>
15
- <li>Puedes ganar dinero vendiendo las casas que renovaste o completando tareas. </li>
16
- <li>Puedes usar el dinero para comprar más casas o más artículos. </li>
17
- <li>Puedes tomar fotos de tu trabajo y compartirlas con otros jugadores. </li>
18
- </ul>
19
- <p>La jugabilidad de House Designer Fix and Flip es simple e intuitiva. Solo tienes que tocar la pantalla para seleccionar o usar un elemento o herramienta. También puede arrastrar o rotar elementos para colocarlos donde desee. Puede acercar o alejar los detalles de la casa. También puede cambiar entre diferentes vistas, como primera persona o tercera persona. </p>
20
- <h2>¿Por qué jugar House Designer Fix y Flip en el PC? </h2>
21
- <h3>Beneficios de jugar en una pantalla más grande con mejores gráficos y rendimiento</h3>
22
- <p>Si bien House Designer Fix and Flip es un gran juego para dispositivos móviles, puede ser aún mejor si lo juegas en tu PC. Estos son algunos de los beneficios de jugar House Designer Fix y Flip en PC:</p>
23
- <ul>
24
- <li>Puedes disfrutar de una pantalla más grande que te permite ver más detalles de la casa. </li>
25
- <li>Puedes tener mejores gráficos que hagan el juego más realista e inmersivo. </li>
26
- <li> Puede tener un rendimiento más rápido que reduce los problemas de retraso o estrellarse. </li>
27
- <li>Usted - Puede utilizar el teclado y el ratón para controlar el juego más fácil y cómodamente. </li>
28
- <li> Puede guardar su progreso y datos en su PC sin preocuparse por perderlos. </li>
29
- </ul>
30
- <h3>Cómo descargar e instalar House Designer Fix and Flip en el PC utilizando un emulador</h3>
31
- <p>Para jugar House Designer Fix and Flip en PC, es necesario utilizar un emulador. Un emulador es un software que le permite ejecutar aplicaciones Android en su PC. Hay muchos emuladores disponibles en línea, pero recomendamos usar BlueStacks, ya que es uno de los más populares y confiables. Estos son los pasos para descargar e instalar House Designer Fix and Flip en PC usando BlueStacks:</p>
32
- <ol>
33
-
34
- <li>Inicie BlueStacks e inicie sesión con su cuenta de Google. </li>
35
- <li>Ir a la tienda de Google Play y buscar Casa Diseñador Fix y Flip.</li>
36
- <li>Descargar e instalar House Designer Fijar y voltear en BlueStacks.</li>
37
- <li>Alternativamente, también puede descargar el archivo House Designer Fix y Flip mod APK de una fuente de confianza, como <a href="">https://apkpure.com/</a>, y arrastrarlo y soltarlo en BlueStacks para instalarlo. </li>
38
- <li>Una vez que la instalación se haya completado, puede iniciar House Designer Fix y Flip en BlueStacks y comenzar a jugar. </li>
39
- </ol>
40
- <h2> Consejos y trucos para jugar House Designer Fix and Flip en PC</h2>
41
- <h3>Cómo usar las herramientas y elementos en el juego</h3>
42
- <p>Para utilizar las herramientas y los elementos en el juego, es necesario tocar los iconos en la parte inferior de la pantalla. También puede usar los atajos de teclado para acceder más rápido. Estos son algunos de los atajos de teclado que puede usar:</p>
43
- <tabla>
44
- <tr>
45
- <th>Icono</th>
46
- <th>Herramienta/artículo</th>
47
- <th>Atajo de teclado</th>
48
- </tr>
49
- <tr>
50
- <td><img src="" alt="Hammer"></td>
51
- <td>Martillo</td>
52
- <td>H</td>
53
- </tr>
54
- <tr>
55
- <td><img src="" alt="Taladro"></td>
56
- <td>Taladro</td>
57
- <td>D</td>
58
- </tr>
59
- <tr>
60
- <td><img src="" alt="Saw"></td>
61
- <td>Sierra</td>
62
- <td>S</td>
63
- </tr>
64
- <tr>
65
- <td><img src="" alt="Llave"></td>
66
- <td>Llave </td>
67
- <td>W</td>
68
- </tr>
69
- <tr>
70
- <td><img src="" alt="Destornillador"></td>
71
- <td>Destornillador</td>
72
- <td>E</td>
73
- </tr>
74
- <tr>
75
- <td><img src="" alt="Rodillo de pintura"></td>
76
- <td>Rodillo de pintura</td>
77
- <td>P</td>
78
- </tr>
79
- <tr>
80
- <td><img src="" alt="Muebles"></td>
81
- <td>Muebles</td>
82
- <td>F</td>
83
- </tr> <h2>Conclusión</h2>
84
- <h3>Resumen de los puntos principales</h3>
85
-
86
- <h3>Preguntas frecuentes</h3>
87
- <p>Aquí están algunas de las preguntas más frecuentes sobre House Designer Fix y Flip mod APK PC:</p>
88
- <ol>
89
- <li>Q: ¿Es seguro descargar e instalar House Designer Fix and Flip mod APK PC? </li>
90
- <li>A: Sí, siempre y cuando descargue el archivo APK mod de una fuente de confianza y utilice un emulador confiable, como BlueStacks. Sin embargo, siempre debe tener cuidado al descargar cualquier archivo de Internet y escanearlo en busca de virus o malware antes de instalarlo. </li>
91
- <li>Q: ¿Cuáles son las ventajas de usar un archivo APK mod para House Designer Fix and Flip? </li>
92
- <li>A: Un archivo APK mod es una versión modificada del archivo APK original que puede tener algunas características o beneficios adicionales, como dinero ilimitado, elementos desbloqueados o sin anuncios. Sin embargo, no todos los archivos APK mod son los mismos, por lo que siempre debe comprobar la descripción y las revisiones del archivo APK mod antes de descargarlo. </li>
93
- <li>Q: ¿Cómo puedo actualizar House Designer Fix and Flip mod APK PC? </li>
94
- <li>A: Para actualizar House Designer Fix y Flip mod APK PC, es necesario descargar e instalar la última versión del archivo APK mod de la misma fuente que lo descargó de. También es posible que tenga que actualizar el emulador para garantizar la compatibilidad. Sin embargo, siempre debe realizar una copia de seguridad de sus datos antes de actualizar nada, ya que algunas actualizaciones pueden causar errores o problemas técnicos. </li>
95
- <li>Q: ¿Cómo puedo contactar al desarrollador de House Designer Fix and Flip? </li>
96
- <li>A: Puede ponerse en contacto con el desarrollador de House Designer Fix and Flip enviando un correo electrónico a <a href="mailto:[email protected]">[email protected]</a> o visitando su página de Facebook en <a href="">https:/www.facebook.com/karaoosestudio/<a>. </li>
97
- <li>Q: ¿Dónde puedo encontrar más información o consejos sobre House Designer Fix and Flip? </li>
98
-
99
- </ol></p> 64aa2da5cf<br />
100
- <br />
101
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Chicos Tropiezo Apk Obb Descargar.md DELETED
@@ -1,68 +0,0 @@
1
- <br />
2
- <h1>Tropezar Chicos APK OBB Descargar: Cómo jugar el último partido Knockout Game en su dispositivo Android</h1>
3
- <p>¿Te encanta jugar juegos de fiesta con tus amigos en línea? ¿Te gustan los desafíos caóticos e hilarantes que ponen a prueba tus habilidades y suerte? Si respondiste que sí, entonces definitivamente deberías probar Stumble Guys, el juego definitivo para dispositivos Android. </p>
4
- <h2>¿Qué es Stumble Guys? </h2>
5
- <h3>Una breve introducción al juego y sus características</h3>
6
- <p>Stumble Guys es un juego de fiesta multijugador desarrollado por Kitka Games. Está inspirado en programas de televisión populares como Wipeout y Takeshi’s Castle, donde los concursantes tienen que superar varios obstáculos y trampas para llegar a la línea de meta. El juego cuenta con hasta 32 jugadores en línea, que tienen que luchar a través de los niveles ronda tras ronda de escalada del caos, hasta que un vencedor permanece. </p>
7
- <h2>chicos tropiezo apk obb descargar</h2><br /><p><b><b>DOWNLOAD</b> >> <a href="https://bltlly.com/2v6M6L">https://bltlly.com/2v6M6L</a></b></p><br /><br />
8
- <p>El juego tiene muchos modos diferentes, mapas y trajes para elegir. Puedes jugar solo o con tus amigos en partidas personalizadas. También puedes desbloquear nuevos trajes y accesorios para tu personaje, como sombreros, gafas, máscaras y más. El juego se actualiza constantemente con nuevos contenidos y mejoras. </p>
9
- <h3>¿Por qué es tan popular y divertido? </h3>
10
- <p>Stumble Guys es uno de los juegos más populares y divertidos de Google Play Store, con más de 10 millones de descargas y una calificación de 4.4 estrellas. El juego es amado por muchos jugadores por su juego simple pero adictivo, gráficos coloridos y física hilarante. El juego es fácil de recoger y jugar, pero difícil de dominar. Nunca se sabe lo que sucederá a continuación, ya que cada nivel está lleno de sorpresas y aleatoriedad. Te reirás, gritarás, rabiarás y celebrarás mientras tropiezas hacia la victoria o la derrota. </p>
11
- <h2>¿Cómo descargar e instalar Stumble Guys APK OBB en su dispositivo Android? </h2>
12
- <h3> ¿Qué son los archivos APK y OBB y por qué los necesita? </h3>
13
-
14
- <p>Stumble Guys es uno de esos juegos que necesitan tanto archivos APK como OBB para funcionar. El archivo APK contiene la información básica y el código del juego, mientras que el archivo OBB contiene los datos adicionales como gráficos, sonidos, mapas, etc. Necesitas ambos archivos para disfrutar de todas las características del juego. </p>
15
- <h3>¿Dónde encontrar la versión más reciente y oficial de Stumble Guys APK OBB? </h3>
16
- <p>La forma más fácil de encontrar la versión más reciente y oficial de Stumble Guys APK OBB es descargarlo desde la Google Play Store. Sin embargo, algunos dispositivos pueden no ser compatibles con el juego o pueden tener espacio de almacenamiento limitado. En ese caso, puede descargar los archivos de una fuente de terceros de confianza como [StumbleGuys.net]( 1 ), que es el único sitio donde se puede descargar Stumble Guys APK OBB gratis. </p <h3>Cómo instalar Stumble Guys APK OBB paso a paso? </h3>
17
- <p>Instalar Stumble Guys APK OBB no es difícil, pero es necesario seguir algunos pasos con cuidado. Aquí hay una guía sobre cómo hacerlo:</p>
18
- <h4>Paso 1: Habilitar fuentes desconocidas en su dispositivo</h4>
19
- <p>Antes de que pueda instalar cualquier archivo APK en su dispositivo, debe habilitar la opción para permitir fuentes desconocidas. Esto le permitirá instalar aplicaciones de fuentes distintas de Google Play Store. Para hacer esto, vaya a la configuración del dispositivo, luego la seguridad, luego cambie la opción de fuentes desconocidas. Puede ver un mensaje de advertencia, pero no se preocupe, es seguro siempre y cuando descargue los archivos de una fuente confiable. </p>
20
- <h4>Paso 2: Descargar Stumble Guys APK OBB de una fuente de confianza</h4>
21
- <p>Siguiente, es necesario descargar los archivos OBB Stumble Guys APK de una fuente de confianza. Como se mencionó anteriormente, el único sitio donde se puede descargar Stumble Guys APK OBB de forma gratuita es [StumbleGuys.net]. Ve al sitio y haz clic en el botón de descarga. Verás dos archivos: uno con la extensión APK y otro con la extensión ZIP. Descarga ambos archivos y guárdalos en una carpeta en tu dispositivo. </p>
22
- <h4>Paso 3: Localizar y extraer el archivo OBB</h4>
23
-
24
- <h4>Paso 4: Instalar el archivo APK y lanzar el juego</h4>
25
- <p>El paso final es instalar el archivo APK y lanzar el juego. Para hacer esto, vuelva a la aplicación de administrador de archivos y toque en el archivo APK. Verá un mensaje pidiéndole que instale la aplicación. Toque en instalar y espere a que termine. Una vez hecho, puede iniciar el juego tocando en su icono en la pantalla de inicio o cajón de aplicaciones. Verás una pantalla de bienvenida y luego el menú principal del juego. </p>
26
- <h2>¿Cómo se juega Stumble chicos en su dispositivo Android? </h2>
27
- <h3>¿Cómo crear una cuenta y personalizar tu personaje? </h3>
28
- <p>Para jugar Stumble Guys online, necesitas crear una cuenta y personalizar tu personaje. Para hacer esto, toque en el botón de reproducción en el menú principal y luego toque en el icono de perfil en la esquina superior izquierda. Verás una pantalla donde puedes introducir tu nombre de usuario, elegir tu región y personalizar tu personaje. Puedes cambiar el color de piel, el estilo de cabello, el atuendo y los accesorios de tu personaje tocando los iconos de abajo. También puedes desbloquear nuevos objetos jugando más partidos o comprándolos con monedas. </p>
29
- <p></p>
30
- <h3>¿Cómo unirse a un partido y competir con otros jugadores? </h3>
31
- <p>Para unirse a un partido y competir con otros jugadores, toque en el botón de reproducción en el menú principal y luego elija uno de los modos: solo o personalizado. En el modo individual, te unirás a un partido aleatorio con hasta 31 jugadores en línea. En el modo personalizado, puede crear o unirse a una partida privada con sus amigos u otros jugadores utilizando un código. Una vez que se une a un partido, verá un temporizador de cuenta atrás y luego el juego comenzará. </p>
32
- <h3> ¿Cómo usar potenciadores y evitar obstáculos? </h3>
33
-
34
- <p>Durante cada nivel, verás varios power-ups que pueden darte una ventaja o desventaja. Algunos de ellos son el aumento de velocidad, el rayo de contracción, el rayo de congelación, cáscara de plátano, etc. Para usarlos, solo pásalos y se activarán automáticamente. Sin embargo, ten cuidado, ya que algunos de ellos también pueden afectarte a ti o a otros jugadores cercanos. </p>
35
- <p>El objetivo de cada nivel es llegar a la línea de meta antes de que otros jugadores o antes de que acabe el tiempo. Solo un cierto número de jugadores puede calificar para cada nivel, así que sé rápido e inteligente. Si no calificas o te caes del mapa, serás eliminado del partido. </p>
36
- <h2>Conclusión</h <h2>Conclusión</h2>
37
- <p>Stumble Guys es un divertido y adictivo juego de fiesta que puedes jugar en tu dispositivo Android con tus amigos u otros jugadores en línea. Es un juego de habilidad, suerte y risa, donde tienes que superar varios obstáculos y trampas para llegar a la meta. Para jugar el juego, es necesario descargar e instalar los archivos OBB Stumble Guys APK de una fuente de confianza. Luego, puedes crear una cuenta, personalizar tu personaje y unirte a una partida. También puedes usar potenciadores y evitar obstáculos para ganar ventaja sobre tus oponentes. Stumble Guys es un juego que te mantendrá entretenido durante horas y te hará sonreír. </p>
38
- <p>Si usted está buscando un nuevo y emocionante juego para jugar en su dispositivo Android, entonces usted debe probar definitivamente Stumble Guys. Es gratis para descargar y jugar, y se actualiza constantemente con nuevos contenidos y mejoras. ¡Descarga Stumble Guys APK OBB hoy y únete al último juego de knockout de fiesta! </p>
39
- <h3>Preguntas frecuentes</h3>
40
- <p>Aquí hay algunas preguntas frecuentes sobre Stumble Guys APK OBB descargar:</p>
41
- <tabla>
42
- <tr>
43
- <th>Pregunta</th>
44
- <th>Respuesta</th>
45
- </tr>
46
- <tr>
47
- <td>¿Es seguro descargar e instalar Stumble Guys APK OBB? </td>
48
-
49
- </tr>
50
- <tr>
51
- <td>¿Necesito una conexión a Internet para jugar a Stumble Guys? </td>
52
- <td>Sí, necesitas una conexión a Internet para jugar a Stumble Guys online con otros jugadores. Sin embargo, también puedes jugar sin conexión en el modo de práctica, donde puedes probar diferentes niveles y potenciadores. </td>
53
- </tr>
54
- <tr>
55
- <td>¿Cómo puedo jugar Stumble Guys con mis amigos? </td>
56
- <td>Puedes jugar a Stumble Guys con tus amigos creando o uniéndote a una partida personalizada. Para hacer esto, toque en el botón de reproducción en el menú principal y luego elegir el modo personalizado. Puede crear una nueva coincidencia o introducir un código para unirse a una existente. También puede invitar a sus amigos compartiendo el código con ellos. </td>
57
- </tr>
58
- <tr>
59
- <td>¿Cómo puedo obtener más monedas en Stumble Guys? </td>
60
- <td>Puedes obtener más monedas en Stumble Guys jugando más partidos, completando misiones diarias, viendo anuncios o comprándolos con dinero real. Puedes usar monedas para desbloquear nuevos atuendos y accesorios para tu personaje. </td>
61
- </tr>
62
- <tr>
63
- <td>¿Cómo puedo contactar a los desarrolladores de Stumble Guys? </td>
64
- <td>Puede ponerse en contacto con los desarrolladores de Stumble Guys enviándoles un correo electrónico a [email protected] o siguiéndolos en sus cuentas de redes sociales como Facebook, Twitter, Instagram y YouTube. También puede dejar un comentario o retroalimentación en Google Play Store.</td>
65
- </tr>
66
- </tabla></p> 64aa2da5cf<br />
67
- <br />
68
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Fuera De La Carretera Mod Apk Todos Los Coches Desbloqueados ltima Versin.md DELETED
@@ -1,55 +0,0 @@
1
-
2
- <h1>Descargar Off The Road Mod APK Todos los coches desbloqueados última versión</h1>
3
- <p>Si eres un fan de los juegos de conducción todoterreno, te encantará Off The Road, un juego realista e inmersivo que te permite conducir varios vehículos en terrenos desafiantes. En este artículo, le mostraremos cómo descargar e instalar Off The Road mod apk, que le da monedas ilimitadas y todos los coches desbloqueados. También compartiremos algunos consejos y trucos para ayudarte a dominar el juego y divertirte más. </p>
4
- <h2>descargar fuera de la carretera mod apk todos los coches desbloqueados última versión</h2><br /><p><b><b>Download</b> &gt; <a href="https://bltlly.com/2v6JHv">https://bltlly.com/2v6JHv</a></b></p><br /><br />
5
- <h2>¿Qué está fuera de la carretera? </h2>
6
- <p>Off The Road es un juego desarrollado por Dogbyte Games, los creadores de Zombie Offroad Safari y Blocky Roads. Está disponible para dispositivos Android e iOS, así como para Nintendo Switch. Off The Road es un juego que cuenta con 12 pistas y 18 vehículos de Ford y Land Rover. El juego tiene cinco modos de juego: carrera rápida, carrera, torneo, árcade y multijugador. El modo carrera es el modo principal del juego, en el que el jugador corre para desbloquear nuevos vehículos, pistas y tipos de carreras. El juego también presenta simulaciones realistas de vehículos y física. Otras características incluyen desafíos todoterreno, trabajos de transporte y la capacidad de jugar el juego con o sin internet. </p>
7
- <h3>Características de Off The Road</h3>
8
- <p>Algunas de las características que hacen de Off The Road un gran juego son:</p>
9
- <ul>
10
- <li><b>Gráficos realistas y efectos de sonido:</b> El juego tiene impresionantes gráficos en 3D que muestran los detalles de los vehículos, los entornos y los efectos meteorológicos. Los efectos de sonido también son realistas y coinciden con las acciones de los vehículos. </li>
11
- <li><b>Diversos vehículos y opciones de personalización:</b> El juego ofrece 18 vehículos diferentes para elegir, incluyendo todoterreno 4x4, camiones, barcos, helicópteros e incluso un tanque. Cada vehículo tiene sus propias características y se puede actualizar con varias piezas y accesorios. El jugador también puede personalizar la apariencia de los vehículos con diferentes colores, pegatinas y calcomanías. </li>
12
-
13
- <li><b>Misiones y tareas desafiantes:</b> El juego tiene varias misiones y tareas que el jugador puede completar para ganar monedas y XP. Estos incluyen carreras contra otros conductores, entrega de carga, rescatar personas, destruir objetivos, realizar acrobacias y más. Las misiones varían en dificultad y recompensa. </li>
14
- <li><b>Modo multijugador y tablas de clasificación en línea:</b> El juego tiene un modo multijugador que permite al jugador unirse o crear salas en línea con hasta siete jugadores más. El modo multijugador tiene diferentes modos, como roaming libre, captura la bandera, rey de la colina, combate a muerte por equipos, carreras y más. El jugador también puede competir con otros jugadores en tablas de clasificación en línea en función de su rendimiento en varias categorías. </li>
15
- </ul>
16
- <h3>¿Por qué descargar Off The Road mod apk? </h3>
17
-
18
- <p>Si desea descargar e instalar Off The Road mod apk, puede seguir estos sencillos pasos:</p>
19
- <h3>Paso 1: Habilitar fuentes desconocidas</h3>
20
- <p>Antes de que pueda instalar Off The Road mod apk, es necesario habilitar fuentes desconocidas en el dispositivo. Esto le permitirá instalar aplicaciones que no son de Google Play o App Store. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo. </p>
21
- <h3>Paso 2: Descargar el archivo apk mod</h3>
22
- <p>Siguiente, es necesario descargar el archivo apk mod de una fuente confiable. Puede utilizar este enlace para descargar la última versión de Off The Road mod apk, que es 1.6.2 a partir de junio de 2023. El tamaño del archivo es de unos 150 MB, así que asegúrate de tener suficiente espacio en tu dispositivo. </p>
23
- <p></p>
24
- <h3>Paso 3: Instalar el archivo apk mod</h3>
25
- <p>Una vez que haya descargado el archivo apk mod, necesita instalarlo en su dispositivo. Para hacer esto, busque el archivo en su administrador de archivos y toque en él. Puede ver una ventana emergente pidiendo permiso para instalar la aplicación. Toque en Instalar y espere a que se complete la instalación. </p>
26
- <h3>Paso 4: Iniciar el juego y disfrutar de</h3>
27
- <p>Después de la instalación, puede iniciar el juego desde el cajón de la aplicación o la pantalla de inicio. Verás que tienes monedas ilimitadas y todos los coches desbloqueados en el juego. Ahora puedes disfrutar jugando Off The Road con todas sus características y contenido gratis. </p>
28
- <h2>Consejos y trucos para jugar fuera de la carretera</h2>
29
- <p>Ahora que ha descargado e instalado Off The Road mod apk, es posible que desee saber algunos consejos y trucos para ayudarle a jugar mejor el juego y divertirse más. Estos son algunos de ellos:</p>
30
- <h3>Utilice el mapa y la brújula para navegar</h3>
31
-
32
- <h3>Actualiza tus vehículos y desbloquea nuevos</h3>
33
- <p>Off The Road tiene 18 vehículos diferentes que puedes conducir en el juego. Cada vehículo tiene sus propias fortalezas y debilidades, tales como velocidad, manejo, durabilidad, capacidad de combustible, etc. Puede actualizar sus vehículos con varias piezas y accesorios, como neumáticos, motores, suspensiones, frenos, etc. Actualizar sus vehículos mejorará su rendimiento y los hará más adecuados para diferentes terrenos y desafíos. También puedes desbloquear vehículos nuevos completando misiones o comprándolos con monedas. Algunos vehículos son exclusivos para la versión premium del juego, pero se puede acceder a ellos con Off The Road mod apk. Puedes cambiar entre diferentes vehículos yendo a tu garaje o encontrándolos en el mundo. </p>
34
- <h3>Completa desafíos y misiones para ganar monedas y XP</h3>
35
- <p>Off The Road tiene varios desafíos y misiones que puedes completar para ganar monedas y XP. Las monedas se utilizan para comprar o actualizar vehículos, mientras que XP se utilizan para subir de nivel y desbloquear nuevas características. Los desafíos son tareas cortas que ponen a prueba tus habilidades en la conducción, como la deriva, saltar, aplastar, etc. Las misiones son tareas más largas que implican carreras, entrega, rescate, destrucción, etc. Puedes encontrar desafíos y misiones en el mapa o en la brújula. También puedes aceptarlos de los NPCs que conoces en el mundo. Completar desafíos y misiones te recompensará con monedas y XP según tu rendimiento. </p>
36
- <h3>Explora el mundo abierto y descubre secretos ocultos</h3>
37
-
38
- <h3>Utilice el helicóptero para volar sobre los obstáculos y llegar a nuevas áreas</h3>
39
- <p>Off The Road tiene un helicóptero que puede utilizar para volar sobre el mundo. El helicóptero es uno de los vehículos más versátiles del juego, ya que puede ir a cualquier parte y hacer cualquier cosa. Puede utilizar el helicóptero para volar sobre los obstáculos y llegar a nuevas áreas que de otra manera son inaccesibles por tierra o agua. También puede utilizar el helicóptero para realizar acrobacias, tales como rollos de barril, bucles, inmersiones, etc. El helicóptero tiene un cabrestante que se puede utilizar para conectar o separar objetos o vehículos. Puede utilizar el cabrestante para levantar o soltar objetos o vehículos en diferentes lugares. También puede utilizar el cabrestante para remolcar o rescatar otros vehículos que están atascados o dañados. </p>
40
- <h2>Conclusión</h2>
41
- <p>Off The Road es un juego que ofrece una experiencia de conducción todoterreno realista e inmersiva. Tiene 18 vehículos diferentes que se pueden conducir en 12 pistas con diversos terrenos y efectos climáticos. Tiene cinco modos de juego que puedes jugar online o offline. Tiene gráficos realistas y efectos de sonido que hacen que el juego sea más agradable. Si desea descargar e instalar Off The Road mod apk, puede seguir los pasos que hemos proporcionado en este artículo. Off The Road mod apk le da monedas ilimitadas y todos los coches desbloqueados de forma gratuita. De esta manera, puedes acceder a todas las características y contenidos del juego sin gastar dinero ni esperar largas horas. Esperamos que haya encontrado este artículo útil e informativo. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. ¡Gracias por leer y jugar feliz! </p>
42
- <h2>Preguntas frecuentes</h2>
43
- <p>Aquí hay algunas preguntas frecuentes acerca de Off The Road mod apk:</p>
44
- <h4>Q: Está fuera de la carretera mod apk seguro para descargar e instalar? </h4>
45
-
46
- <h4>Q: ¿Cuáles son los beneficios de Off The Road mod apk? </h4>
47
- <p>A: Off The Road mod apk le da monedas ilimitadas y todos los coches desbloqueados de forma gratuita. Esto significa que puedes disfrutar de todas las características y contenidos del juego sin gastar dinero ni esperar largas horas. También puede acceder a los vehículos premium que no están disponibles en la versión gratuita. </p>
48
- <h4>Q: ¿Cómo puedo actualizar Off The Road mod apk? </h4>
49
- <p>A: Para actualizar Off The Road mod apk, es necesario descargar e instalar la última versión del archivo apk mod de una fuente confiable. Puede utilizar este enlace para descargar la última versión de Off The Road mod apk, que es 1.6.2 a partir de junio de 2023. No es necesario desinstalar la versión anterior del apk mod antes de instalar el nuevo. </p>
50
- <h4>Q: ¿Cómo puedo desinstalar Off The Road mod apk? </h4>
51
- <p>A: Para desinstalar Off The Road mod apk, es necesario ir a Configuración > Aplicaciones > Off The Road > Desinstalar y toque en OK. También puedes desinstalar el juego presionando su icono en la pantalla de inicio o en el cajón de la aplicación y arrastrándolo a la opción Desinstalar. </p>
52
- <h4>Q: ¿Puedo jugar Off The Road mod apk con mis amigos? </h4>
53
- <p>A: Sí, puedes jugar Off The Road mod apk con tus amigos en línea o fuera de línea. Puedes unirte o crear salas en línea con hasta siete jugadores más en el modo multijugador. También puedes jugar con tus amigos en Wi-Fi local o Bluetooth en el modo árcade. </p> 64aa2da5cf<br />
54
- <br />
55
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/metadata/__init__.py DELETED
@@ -1,127 +0,0 @@
1
- import contextlib
2
- import functools
3
- import os
4
- import sys
5
- from typing import TYPE_CHECKING, List, Optional, Type, cast
6
-
7
- from pip._internal.utils.misc import strtobool
8
-
9
- from .base import BaseDistribution, BaseEnvironment, FilesystemWheel, MemoryWheel, Wheel
10
-
11
- if TYPE_CHECKING:
12
- from typing import Protocol
13
- else:
14
- Protocol = object
15
-
16
- __all__ = [
17
- "BaseDistribution",
18
- "BaseEnvironment",
19
- "FilesystemWheel",
20
- "MemoryWheel",
21
- "Wheel",
22
- "get_default_environment",
23
- "get_environment",
24
- "get_wheel_distribution",
25
- "select_backend",
26
- ]
27
-
28
-
29
- def _should_use_importlib_metadata() -> bool:
30
- """Whether to use the ``importlib.metadata`` or ``pkg_resources`` backend.
31
-
32
- By default, pip uses ``importlib.metadata`` on Python 3.11+, and
33
- ``pkg_resourcess`` otherwise. This can be overridden by a couple of ways:
34
-
35
- * If environment variable ``_PIP_USE_IMPORTLIB_METADATA`` is set, it
36
- dictates whether ``importlib.metadata`` is used, regardless of Python
37
- version.
38
- * On Python 3.11+, Python distributors can patch ``importlib.metadata``
39
- to add a global constant ``_PIP_USE_IMPORTLIB_METADATA = False``. This
40
- makes pip use ``pkg_resources`` (unless the user set the aforementioned
41
- environment variable to *True*).
42
- """
43
- with contextlib.suppress(KeyError, ValueError):
44
- return bool(strtobool(os.environ["_PIP_USE_IMPORTLIB_METADATA"]))
45
- if sys.version_info < (3, 11):
46
- return False
47
- import importlib.metadata
48
-
49
- return bool(getattr(importlib.metadata, "_PIP_USE_IMPORTLIB_METADATA", True))
50
-
51
-
52
- class Backend(Protocol):
53
- Distribution: Type[BaseDistribution]
54
- Environment: Type[BaseEnvironment]
55
-
56
-
57
- @functools.lru_cache(maxsize=None)
58
- def select_backend() -> Backend:
59
- if _should_use_importlib_metadata():
60
- from . import importlib
61
-
62
- return cast(Backend, importlib)
63
- from . import pkg_resources
64
-
65
- return cast(Backend, pkg_resources)
66
-
67
-
68
- def get_default_environment() -> BaseEnvironment:
69
- """Get the default representation for the current environment.
70
-
71
- This returns an Environment instance from the chosen backend. The default
72
- Environment instance should be built from ``sys.path`` and may use caching
73
- to share instance state accorss calls.
74
- """
75
- return select_backend().Environment.default()
76
-
77
-
78
- def get_environment(paths: Optional[List[str]]) -> BaseEnvironment:
79
- """Get a representation of the environment specified by ``paths``.
80
-
81
- This returns an Environment instance from the chosen backend based on the
82
- given import paths. The backend must build a fresh instance representing
83
- the state of installed distributions when this function is called.
84
- """
85
- return select_backend().Environment.from_paths(paths)
86
-
87
-
88
- def get_directory_distribution(directory: str) -> BaseDistribution:
89
- """Get the distribution metadata representation in the specified directory.
90
-
91
- This returns a Distribution instance from the chosen backend based on
92
- the given on-disk ``.dist-info`` directory.
93
- """
94
- return select_backend().Distribution.from_directory(directory)
95
-
96
-
97
- def get_wheel_distribution(wheel: Wheel, canonical_name: str) -> BaseDistribution:
98
- """Get the representation of the specified wheel's distribution metadata.
99
-
100
- This returns a Distribution instance from the chosen backend based on
101
- the given wheel's ``.dist-info`` directory.
102
-
103
- :param canonical_name: Normalized project name of the given wheel.
104
- """
105
- return select_backend().Distribution.from_wheel(wheel, canonical_name)
106
-
107
-
108
- def get_metadata_distribution(
109
- metadata_contents: bytes,
110
- filename: str,
111
- canonical_name: str,
112
- ) -> BaseDistribution:
113
- """Get the dist representation of the specified METADATA file contents.
114
-
115
- This returns a Distribution instance from the chosen backend sourced from the data
116
- in `metadata_contents`.
117
-
118
- :param metadata_contents: Contents of a METADATA file within a dist, or one served
119
- via PEP 658.
120
- :param filename: Filename for the dist this metadata represents.
121
- :param canonical_name: Normalized project name of the given dist.
122
- """
123
- return select_backend().Distribution.from_metadata_file_contents(
124
- metadata_contents,
125
- filename,
126
- canonical_name,
127
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/network/__init__.py DELETED
@@ -1,2 +0,0 @@
1
- """Contains purely network-related utilities.
2
- """
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/formatter.py DELETED
@@ -1,94 +0,0 @@
1
- """
2
- pygments.formatter
3
- ~~~~~~~~~~~~~~~~~~
4
-
5
- Base formatter class.
6
-
7
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
8
- :license: BSD, see LICENSE for details.
9
- """
10
-
11
- import codecs
12
-
13
- from pip._vendor.pygments.util import get_bool_opt
14
- from pip._vendor.pygments.styles import get_style_by_name
15
-
16
- __all__ = ['Formatter']
17
-
18
-
19
- def _lookup_style(style):
20
- if isinstance(style, str):
21
- return get_style_by_name(style)
22
- return style
23
-
24
-
25
- class Formatter:
26
- """
27
- Converts a token stream to text.
28
-
29
- Options accepted:
30
-
31
- ``style``
32
- The style to use, can be a string or a Style subclass
33
- (default: "default"). Not used by e.g. the
34
- TerminalFormatter.
35
- ``full``
36
- Tells the formatter to output a "full" document, i.e.
37
- a complete self-contained document. This doesn't have
38
- any effect for some formatters (default: false).
39
- ``title``
40
- If ``full`` is true, the title that should be used to
41
- caption the document (default: '').
42
- ``encoding``
43
- If given, must be an encoding name. This will be used to
44
- convert the Unicode token strings to byte strings in the
45
- output. If it is "" or None, Unicode strings will be written
46
- to the output file, which most file-like objects do not
47
- support (default: None).
48
- ``outencoding``
49
- Overrides ``encoding`` if given.
50
- """
51
-
52
- #: Name of the formatter
53
- name = None
54
-
55
- #: Shortcuts for the formatter
56
- aliases = []
57
-
58
- #: fn match rules
59
- filenames = []
60
-
61
- #: If True, this formatter outputs Unicode strings when no encoding
62
- #: option is given.
63
- unicodeoutput = True
64
-
65
- def __init__(self, **options):
66
- self.style = _lookup_style(options.get('style', 'default'))
67
- self.full = get_bool_opt(options, 'full', False)
68
- self.title = options.get('title', '')
69
- self.encoding = options.get('encoding', None) or None
70
- if self.encoding in ('guess', 'chardet'):
71
- # can happen for e.g. pygmentize -O encoding=guess
72
- self.encoding = 'utf-8'
73
- self.encoding = options.get('outencoding') or self.encoding
74
- self.options = options
75
-
76
- def get_style_defs(self, arg=''):
77
- """
78
- Return the style definitions for the current style as a string.
79
-
80
- ``arg`` is an additional argument whose meaning depends on the
81
- formatter used. Note that ``arg`` can also be a list or tuple
82
- for some formatters like the html formatter.
83
- """
84
- return ''
85
-
86
- def format(self, tokensource, outfile):
87
- """
88
- Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
89
- tuples and write it into ``outfile``.
90
- """
91
- if self.encoding:
92
- # wrap the outfile in a StreamWriter
93
- outfile = codecs.lookup(self.encoding)[3](outfile)
94
- return self.format_unencoded(tokensource, outfile)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Branon/TurboKeys/Dockerfile DELETED
@@ -1,11 +0,0 @@
1
- FROM node:18-bullseye-slim
2
- RUN apt-get update && \
3
- apt-get install -y git
4
- RUN git clone https://gitlab.com/khanon/oai-proxy.git /app
5
- WORKDIR /app
6
- RUN npm install
7
- COPY Dockerfile greeting.md* .env* ./
8
- RUN npm run build
9
- EXPOSE 7860
10
- ENV NODE_ENV=production
11
- CMD [ "npm", "start" ]
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/configs/Detectron1-Comparisons/README.md DELETED
@@ -1,82 +0,0 @@
1
-
2
- Detectron2's default settings and a few implementation details are different from Detectron.
3
-
4
- The differences in implementation details are shared in
5
- [Compatibility with Other Libraries](../../docs/notes/compatibility.md).
6
-
7
- The differences in default config includes:
8
- * Use scale augmentation during training. This improves AP with lower training cost.
9
- * Use L1 loss instead of smooth L1 loss for simplicity. This sometimes improves box AP but may
10
- affect other AP.
11
- * Use `POOLER_SAMPLING_RATIO=0` instead of 2. This does not significantly affect AP.
12
- * Use `ROIAlignV2`. This does not significantly affect AP.
13
-
14
- In this directory, we provide a few configs that mimic Detectron's behavior as close as possible.
15
- This provides a fair comparison of accuracy and speed against Detectron.
16
-
17
- <!--
18
- ./gen_html_table.py --config 'Detectron1-Comparisons/*.yaml' --name "Faster R-CNN" "Keypoint R-CNN" "Mask R-CNN" --fields lr_sched train_speed inference_speed mem box_AP mask_AP keypoint_AP --base-dir ../../../configs/Detectron1-Comparisons
19
- -->
20
-
21
-
22
- <table><tbody>
23
- <!-- START TABLE -->
24
- <!-- TABLE HEADER -->
25
- <th valign="bottom">Name</th>
26
- <th valign="bottom">lr<br/>sched</th>
27
- <th valign="bottom">train<br/>time<br/>(s/iter)</th>
28
- <th valign="bottom">inference<br/>time<br/>(s/im)</th>
29
- <th valign="bottom">train<br/>mem<br/>(GB)</th>
30
- <th valign="bottom">box<br/>AP</th>
31
- <th valign="bottom">mask<br/>AP</th>
32
- <th valign="bottom">kp.<br/>AP</th>
33
- <th valign="bottom">model id</th>
34
- <th valign="bottom">download</th>
35
- <!-- TABLE BODY -->
36
- <!-- ROW: faster_rcnn_R_50_FPN_noaug_1x -->
37
- <tr><td align="left"><a href="faster_rcnn_R_50_FPN_noaug_1x.yaml">Faster R-CNN</a></td>
38
- <td align="center">1x</td>
39
- <td align="center">0.219</td>
40
- <td align="center">0.038</td>
41
- <td align="center">3.1</td>
42
- <td align="center">36.9</td>
43
- <td align="center"></td>
44
- <td align="center"></td>
45
- <td align="center">137781054</td>
46
- <td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x/137781054/model_final_7ab50c.pkl">model</a>&nbsp;|&nbsp;<a href="https://dl.fbaipublicfiles.com/detectron2/Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x/137781054/metrics.json">metrics</a></td>
47
- </tr>
48
- <!-- ROW: keypoint_rcnn_R_50_FPN_1x -->
49
- <tr><td align="left"><a href="keypoint_rcnn_R_50_FPN_1x.yaml">Keypoint R-CNN</a></td>
50
- <td align="center">1x</td>
51
- <td align="center">0.313</td>
52
- <td align="center">0.071</td>
53
- <td align="center">5.0</td>
54
- <td align="center">53.1</td>
55
- <td align="center"></td>
56
- <td align="center">64.2</td>
57
- <td align="center">137781195</td>
58
- <td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x/137781195/model_final_cce136.pkl">model</a>&nbsp;|&nbsp;<a href="https://dl.fbaipublicfiles.com/detectron2/Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x/137781195/metrics.json">metrics</a></td>
59
- </tr>
60
- <!-- ROW: mask_rcnn_R_50_FPN_noaug_1x -->
61
- <tr><td align="left"><a href="mask_rcnn_R_50_FPN_noaug_1x.yaml">Mask R-CNN</a></td>
62
- <td align="center">1x</td>
63
- <td align="center">0.273</td>
64
- <td align="center">0.043</td>
65
- <td align="center">3.4</td>
66
- <td align="center">37.8</td>
67
- <td align="center">34.9</td>
68
- <td align="center"></td>
69
- <td align="center">137781281</td>
70
- <td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x/137781281/model_final_62ca52.pkl">model</a>&nbsp;|&nbsp;<a href="https://dl.fbaipublicfiles.com/detectron2/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x/137781281/metrics.json">metrics</a></td>
71
- </tr>
72
- </tbody></table>
73
-
74
- ## Comparisons:
75
-
76
- * Faster R-CNN: Detectron's AP is 36.7, similar to ours.
77
- * Keypoint R-CNN: Detectron's AP is box 53.6, keypoint 64.2. Fixing a Detectron's
78
- [bug](https://github.com/facebookresearch/Detectron/issues/459) lead to a drop in box AP, and can be
79
- compensated back by some parameter tuning.
80
- * Mask R-CNN: Detectron's AP is box 37.7, mask 33.9. We're 1 AP better in mask AP, due to more correct implementation.
81
-
82
- For speed comparison, see [benchmarks](https://detectron2.readthedocs.io/notes/benchmarks.html).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tests/test_buffers.py DELETED
@@ -1,109 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- import io
3
- import struct
4
-
5
- import pytest
6
-
7
- import env # noqa: F401
8
-
9
- from pybind11_tests import buffers as m
10
- from pybind11_tests import ConstructorStats
11
-
12
- np = pytest.importorskip("numpy")
13
-
14
-
15
- def test_from_python():
16
- with pytest.raises(RuntimeError) as excinfo:
17
- m.Matrix(np.array([1, 2, 3])) # trying to assign a 1D array
18
- assert str(excinfo.value) == "Incompatible buffer format!"
19
-
20
- m3 = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
21
- m4 = m.Matrix(m3)
22
-
23
- for i in range(m4.rows()):
24
- for j in range(m4.cols()):
25
- assert m3[i, j] == m4[i, j]
26
-
27
- cstats = ConstructorStats.get(m.Matrix)
28
- assert cstats.alive() == 1
29
- del m3, m4
30
- assert cstats.alive() == 0
31
- assert cstats.values() == ["2x3 matrix"]
32
- assert cstats.copy_constructions == 0
33
- # assert cstats.move_constructions >= 0 # Don't invoke any
34
- assert cstats.copy_assignments == 0
35
- assert cstats.move_assignments == 0
36
-
37
-
38
- # https://foss.heptapod.net/pypy/pypy/-/issues/2444
39
- def test_to_python():
40
- mat = m.Matrix(5, 4)
41
- assert memoryview(mat).shape == (5, 4)
42
-
43
- assert mat[2, 3] == 0
44
- mat[2, 3] = 4.0
45
- mat[3, 2] = 7.0
46
- assert mat[2, 3] == 4
47
- assert mat[3, 2] == 7
48
- assert struct.unpack_from('f', mat, (3 * 4 + 2) * 4) == (7, )
49
- assert struct.unpack_from('f', mat, (2 * 4 + 3) * 4) == (4, )
50
-
51
- mat2 = np.array(mat, copy=False)
52
- assert mat2.shape == (5, 4)
53
- assert abs(mat2).sum() == 11
54
- assert mat2[2, 3] == 4 and mat2[3, 2] == 7
55
- mat2[2, 3] = 5
56
- assert mat2[2, 3] == 5
57
-
58
- cstats = ConstructorStats.get(m.Matrix)
59
- assert cstats.alive() == 1
60
- del mat
61
- pytest.gc_collect()
62
- assert cstats.alive() == 1
63
- del mat2 # holds a mat reference
64
- pytest.gc_collect()
65
- assert cstats.alive() == 0
66
- assert cstats.values() == ["5x4 matrix"]
67
- assert cstats.copy_constructions == 0
68
- # assert cstats.move_constructions >= 0 # Don't invoke any
69
- assert cstats.copy_assignments == 0
70
- assert cstats.move_assignments == 0
71
-
72
-
73
- def test_inherited_protocol():
74
- """SquareMatrix is derived from Matrix and inherits the buffer protocol"""
75
-
76
- matrix = m.SquareMatrix(5)
77
- assert memoryview(matrix).shape == (5, 5)
78
- assert np.asarray(matrix).shape == (5, 5)
79
-
80
-
81
- def test_pointer_to_member_fn():
82
- for cls in [m.Buffer, m.ConstBuffer, m.DerivedBuffer]:
83
- buf = cls()
84
- buf.value = 0x12345678
85
- value = struct.unpack('i', bytearray(buf))[0]
86
- assert value == 0x12345678
87
-
88
-
89
- def test_readonly_buffer():
90
- buf = m.BufferReadOnly(0x64)
91
- view = memoryview(buf)
92
- assert view[0] == b'd' if env.PY2 else 0x64
93
- assert view.readonly
94
-
95
-
96
- def test_selective_readonly_buffer():
97
- buf = m.BufferReadOnlySelect()
98
-
99
- memoryview(buf)[0] = b'd' if env.PY2 else 0x64
100
- assert buf.value == 0x64
101
-
102
- io.BytesIO(b'A').readinto(buf)
103
- assert buf.value == ord(b'A')
104
-
105
- buf.readonly = True
106
- with pytest.raises(TypeError):
107
- memoryview(buf)[0] = b'\0' if env.PY2 else 0
108
- with pytest.raises(TypeError):
109
- io.BytesIO(b'1').readinto(buf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/replace.h DELETED
@@ -1,823 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- /*! \file replace.h
19
- * \brief Functions for replacing elements in a range with a particular value
20
- */
21
-
22
- #pragma once
23
-
24
- #include <thrust/detail/config.h>
25
- #include <thrust/detail/execution_policy.h>
26
-
27
- namespace thrust
28
- {
29
-
30
-
31
- /*! \addtogroup transformations
32
- * \addtogroup replacing
33
- * \ingroup transformations
34
- * \{
35
- */
36
-
37
-
38
- /*! \p replace replaces every element in the range [first, last) equal to \p old_value
39
- * with \p new_value. That is: for every iterator \c i, if <tt>*i == old_value</tt>
40
- * then it performs the <tt>assignment *i = new_value</tt>.
41
- *
42
- * The algorithm's execution is parallelized as determined by \p exec.
43
- *
44
- * \param exec The execution policy to use for parallelization.
45
- * \param first The beginning of the sequence of interest.
46
- * \param last The end of the sequence of interest.
47
- * \param old_value The value to replace.
48
- * \param new_value The new value to replace \p old_value.
49
- *
50
- * \tparam DerivedPolicy The name of the derived execution policy.
51
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
52
- * and \p ForwardIterator is mutable.
53
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html>Assignable.html">Assignable</a>,
54
- * \p T is a model of <a href="http://www.sgi.com/tech/stl/EqualityComparable.html">EqualityComparable</a>,
55
- * objects of \p T may be compared for equality with objects of
56
- * \p ForwardIterator's \c value_type,
57
- * and \p T is convertible to \p ForwardIterator's \c value_type.
58
- *
59
- * The following code snippet demonstrates how to use \p replace to replace
60
- * a value of interest in a \c device_vector with another using the \p thrust::device
61
- * execution policy for parallelization:
62
- *
63
- * \code
64
- * #include <thrust/replace.h>
65
- * #include <thrust/device_vector.h>
66
- * #include <thrust/execution_policy.h>
67
- *
68
- * ...
69
- *
70
- * thrust::device_vector<int> A(4);
71
- * A[0] = 1;
72
- * A[1] = 2;
73
- * A[2] = 3;
74
- * A[3] = 1;
75
- *
76
- * thrust::replace(thrust::device, A.begin(), A.end(), 1, 99);
77
- *
78
- * // A contains [99, 2, 3, 99]
79
- * \endcode
80
- *
81
- * \see http://www.sgi.com/tech/stl/replace.html
82
- * \see \c replace_if
83
- * \see \c replace_copy
84
- * \see \c replace_copy_if
85
- */
86
- template<typename DerivedPolicy, typename ForwardIterator, typename T>
87
- __host__ __device__
88
- void replace(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
89
- ForwardIterator first, ForwardIterator last,
90
- const T &old_value,
91
- const T &new_value);
92
-
93
-
94
- /*! \p replace replaces every element in the range [first, last) equal to \p old_value
95
- * with \p new_value. That is: for every iterator \c i, if <tt>*i == old_value</tt>
96
- * then it performs the <tt>assignment *i = new_value</tt>.
97
- *
98
- * \param first The beginning of the sequence of interest.
99
- * \param last The end of the sequence of interest.
100
- * \param old_value The value to replace.
101
- * \param new_value The new value to replace \p old_value.
102
- *
103
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
104
- * and \p ForwardIterator is mutable.
105
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html>Assignable.html">Assignable</a>,
106
- * \p T is a model of <a href="http://www.sgi.com/tech/stl/EqualityComparable.html">EqualityComparable</a>,
107
- * objects of \p T may be compared for equality with objects of
108
- * \p ForwardIterator's \c value_type,
109
- * and \p T is convertible to \p ForwardIterator's \c value_type.
110
- *
111
- * The following code snippet demonstrates how to use \p replace to replace
112
- * a value of interest in a \c device_vector with another.
113
- *
114
- * \code
115
- * #include <thrust/replace.h>
116
- * #include <thrust/device_vector.h>
117
- *
118
- * ...
119
- *
120
- * thrust::device_vector<int> A(4);
121
- * A[0] = 1;
122
- * A[1] = 2;
123
- * A[2] = 3;
124
- * A[3] = 1;
125
- *
126
- * thrust::replace(A.begin(), A.end(), 1, 99);
127
- *
128
- * // A contains [99, 2, 3, 99]
129
- * \endcode
130
- *
131
- * \see http://www.sgi.com/tech/stl/replace.html
132
- * \see \c replace_if
133
- * \see \c replace_copy
134
- * \see \c replace_copy_if
135
- */
136
- template<typename ForwardIterator, typename T>
137
- void replace(ForwardIterator first, ForwardIterator last, const T &old_value,
138
- const T &new_value);
139
-
140
-
141
- /*! \p replace_if replaces every element in the range <tt>[first, last)</tt> for which
142
- * \p pred returns \c true with \p new_value. That is: for every iterator \c i, if
143
- * <tt>pred(*i)</tt> is \c true then it performs the assignment <tt>*i = new_value</tt>.
144
- *
145
- * The algorithm's execution is parallelized as determined by \p exec.
146
- *
147
- * \param exec The execution policy to use for parallelization.
148
- * \param first The beginning of the sequence of interest.
149
- * \param last The end of the sequence of interest.
150
- * \param pred The predicate to test on every value of the range <tt>[first,last)</tt>.
151
- * \param new_value The new value to replace elements which <tt>pred(*i)</tt> evaluates
152
- * to \c true.
153
- *
154
- * \tparam DerivedPolicy The name of the derived execution policy.
155
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
156
- * \p ForwardIterator is mutable,
157
- * and \p ForwardIterator's \c value_type is convertible to \p Predicate's \c argument_type.
158
- * \tparam Predicate is a model of <a href="http://www.sgi.com/tech/stl/Predicate.html">Predicate</a>.
159
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html">Assignable</a>,
160
- * and \p T is convertible to \p ForwardIterator's \c value_type.
161
- *
162
- * The following code snippet demonstrates how to use \p replace_if to replace
163
- * a \c device_vector's negative elements with \c 0 using the \p thrust::device execution policy
164
- * for parallelization:
165
- *
166
- * \code
167
- * #include <thrust/replace.h>
168
- * #include <thrust/device_vector.h>
169
- * #include <thrust/execution_policy.h>
170
- * ...
171
- * struct is_less_than_zero
172
- * {
173
- * __host__ __device__
174
- * bool operator()(int x)
175
- * {
176
- * return x < 0;
177
- * }
178
- * };
179
- *
180
- * ...
181
- *
182
- * thrust::device_vector<int> A(4);
183
- * A[0] = 1;
184
- * A[1] = -3;
185
- * A[2] = 2;
186
- * A[3] = -1;
187
- *
188
- * is_less_than_zero pred;
189
- *
190
- * thrust::replace_if(thrust::device, A.begin(), A.end(), pred, 0);
191
- *
192
- * // A contains [1, 0, 2, 0]
193
- * \endcode
194
- *
195
- * \see http://www.sgi.com/tech/stl/replace_if.html
196
- * \see \c replace
197
- * \see \c replace_copy
198
- * \see \c replace_copy_if
199
- */
200
- template<typename DerivedPolicy, typename ForwardIterator, typename Predicate, typename T>
201
- __host__ __device__
202
- void replace_if(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
203
- ForwardIterator first, ForwardIterator last,
204
- Predicate pred,
205
- const T &new_value);
206
-
207
-
208
- /*! \p replace_if replaces every element in the range <tt>[first, last)</tt> for which
209
- * \p pred returns \c true with \p new_value. That is: for every iterator \c i, if
210
- * <tt>pred(*i)</tt> is \c true then it performs the assignment <tt>*i = new_value</tt>.
211
- *
212
- * \param first The beginning of the sequence of interest.
213
- * \param last The end of the sequence of interest.
214
- * \param pred The predicate to test on every value of the range <tt>[first,last)</tt>.
215
- * \param new_value The new value to replace elements which <tt>pred(*i)</tt> evaluates
216
- * to \c true.
217
- *
218
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
219
- * \p ForwardIterator is mutable,
220
- * and \p ForwardIterator's \c value_type is convertible to \p Predicate's \c argument_type.
221
- * \tparam Predicate is a model of <a href="http://www.sgi.com/tech/stl/Predicate.html">Predicate</a>.
222
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html">Assignable</a>,
223
- * and \p T is convertible to \p ForwardIterator's \c value_type.
224
- *
225
- * The following code snippet demonstrates how to use \p replace_if to replace
226
- * a \c device_vector's negative elements with \c 0.
227
- *
228
- * \code
229
- * #include <thrust/replace.h>
230
- * #include <thrust/device_vector.h>
231
- * ...
232
- * struct is_less_than_zero
233
- * {
234
- * __host__ __device__
235
- * bool operator()(int x)
236
- * {
237
- * return x < 0;
238
- * }
239
- * };
240
- *
241
- * ...
242
- *
243
- * thrust::device_vector<int> A(4);
244
- * A[0] = 1;
245
- * A[1] = -3;
246
- * A[2] = 2;
247
- * A[3] = -1;
248
- *
249
- * is_less_than_zero pred;
250
- *
251
- * thrust::replace_if(A.begin(), A.end(), pred, 0);
252
- *
253
- * // A contains [1, 0, 2, 0]
254
- * \endcode
255
- *
256
- * \see http://www.sgi.com/tech/stl/replace_if.html
257
- * \see \c replace
258
- * \see \c replace_copy
259
- * \see \c replace_copy_if
260
- */
261
- template<typename ForwardIterator, typename Predicate, typename T>
262
- void replace_if(ForwardIterator first, ForwardIterator last,
263
- Predicate pred,
264
- const T &new_value);
265
-
266
-
267
- /*! \p replace_if replaces every element in the range <tt>[first, last)</tt> for which
268
- * <tt>pred(*s)</tt> returns \c true with \p new_value. That is: for every iterator
269
- * \c i in the range <tt>[first, last)</tt>, and \c s in the range <tt>[stencil, stencil + (last - first))</tt>,
270
- * if <tt>pred(*s)</tt> is \c true then it performs the assignment <tt>*i = new_value</tt>.
271
- *
272
- * The algorithm's execution is parallelized as determined by \p exec.
273
- *
274
- * \param exec The execution policy to use for parallelization.
275
- * \param first The beginning of the sequence of interest.
276
- * \param last The end of the sequence of interest.
277
- * \param stencil The beginning of the stencil sequence.
278
- * \param pred The predicate to test on every value of the range <tt>[first,last)</tt>.
279
- * \param new_value The new value to replace elements which <tt>pred(*i)</tt> evaluates
280
- * to \c true.
281
- *
282
- * \tparam DerivedPolicy The name of the derived execution policy.
283
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
284
- * and \p ForwardIterator is mutable.
285
- * \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>,
286
- * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type.
287
- * \tparam Predicate is a model of <a href="http://www.sgi.com/tech/stl/Predicate.html">Predicate</a>.
288
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html">Assignable</a>,
289
- * and \p T is convertible to \p ForwardIterator's \c value_type.
290
- *
291
- * The following code snippet demonstrates how to use \p replace_if to replace
292
- * a \c device_vector's element with \c 0 when its corresponding stencil element is less than zero
293
- * using the \p thrust::device execution policy for parallelization:
294
- *
295
- * \code
296
- * #include <thrust/replace.h>
297
- * #include <thrust/device_vector.h>
298
- * #include <thrust/execution_policy.h>
299
- *
300
- * struct is_less_than_zero
301
- * {
302
- * __host__ __device__
303
- * bool operator()(int x)
304
- * {
305
- * return x < 0;
306
- * }
307
- * };
308
- *
309
- * ...
310
- *
311
- * thrust::device_vector<int> A(4);
312
- * A[0] = 10;
313
- * A[1] = 20;
314
- * A[2] = 30;
315
- * A[3] = 40;
316
- *
317
- * thrust::device_vector<int> S(4);
318
- * S[0] = -1;
319
- * S[1] = 0;
320
- * S[2] = -1;
321
- * S[3] = 0;
322
- *
323
- * is_less_than_zero pred;
324
- * thrust::replace_if(thrust::device, A.begin(), A.end(), S.begin(), pred, 0);
325
- *
326
- * // A contains [0, 20, 0, 40]
327
- * \endcode
328
- *
329
- * \see http://www.sgi.com/tech/stl/replace_if.html
330
- * \see \c replace
331
- * \see \c replace_copy
332
- * \see \c replace_copy_if
333
- */
334
- template<typename DerivedPolicy, typename ForwardIterator, typename InputIterator, typename Predicate, typename T>
335
- __host__ __device__
336
- void replace_if(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
337
- ForwardIterator first, ForwardIterator last,
338
- InputIterator stencil,
339
- Predicate pred,
340
- const T &new_value);
341
-
342
-
343
- /*! \p replace_if replaces every element in the range <tt>[first, last)</tt> for which
344
- * <tt>pred(*s)</tt> returns \c true with \p new_value. That is: for every iterator
345
- * \c i in the range <tt>[first, last)</tt>, and \c s in the range <tt>[stencil, stencil + (last - first))</tt>,
346
- * if <tt>pred(*s)</tt> is \c true then it performs the assignment <tt>*i = new_value</tt>.
347
- *
348
- * \param first The beginning of the sequence of interest.
349
- * \param last The end of the sequence of interest.
350
- * \param stencil The beginning of the stencil sequence.
351
- * \param pred The predicate to test on every value of the range <tt>[first,last)</tt>.
352
- * \param new_value The new value to replace elements which <tt>pred(*i)</tt> evaluates
353
- * to \c true.
354
- *
355
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
356
- * and \p ForwardIterator is mutable.
357
- * \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>,
358
- * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type.
359
- * \tparam Predicate is a model of <a href="http://www.sgi.com/tech/stl/Predicate.html">Predicate</a>.
360
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html">Assignable</a>,
361
- * and \p T is convertible to \p ForwardIterator's \c value_type.
362
- *
363
- * The following code snippet demonstrates how to use \p replace_if to replace
364
- * a \c device_vector's element with \c 0 when its corresponding stencil element is less than zero.
365
- *
366
- * \code
367
- * #include <thrust/replace.h>
368
- * #include <thrust/device_vector.h>
369
- *
370
- * struct is_less_than_zero
371
- * {
372
- * __host__ __device__
373
- * bool operator()(int x)
374
- * {
375
- * return x < 0;
376
- * }
377
- * };
378
- *
379
- * ...
380
- *
381
- * thrust::device_vector<int> A(4);
382
- * A[0] = 10;
383
- * A[1] = 20;
384
- * A[2] = 30;
385
- * A[3] = 40;
386
- *
387
- * thrust::device_vector<int> S(4);
388
- * S[0] = -1;
389
- * S[1] = 0;
390
- * S[2] = -1;
391
- * S[3] = 0;
392
- *
393
- * is_less_than_zero pred;
394
- * thrust::replace_if(A.begin(), A.end(), S.begin(), pred, 0);
395
- *
396
- * // A contains [0, 20, 0, 40]
397
- * \endcode
398
- *
399
- * \see http://www.sgi.com/tech/stl/replace_if.html
400
- * \see \c replace
401
- * \see \c replace_copy
402
- * \see \c replace_copy_if
403
- */
404
- template<typename ForwardIterator, typename InputIterator, typename Predicate, typename T>
405
- void replace_if(ForwardIterator first, ForwardIterator last,
406
- InputIterator stencil,
407
- Predicate pred,
408
- const T &new_value);
409
-
410
-
411
- /*! \p replace_copy copies elements from the range <tt>[first, last)</tt> to the range
412
- * <tt>[result, result + (last-first))</tt>, except that any element equal to \p old_value
413
- * is not copied; \p new_value is copied instead.
414
- *
415
- * More precisely, for every integer \c n such that <tt>0 <= n < last-first</tt>, \p replace_copy
416
- * performs the assignment <tt>*(result+n) = new_value</tt> if <tt>*(first+n) == old_value</tt>,
417
- * and <tt>*(result+n) = *(first+n)</tt> otherwise.
418
- *
419
- * The algorithm's execution is parallelized as determined by \p exec.
420
- *
421
- * \param exec The execution policy to use for parallelization.
422
- * \param first The beginning of the sequence to copy from.
423
- * \param last The end of the sequence to copy from.
424
- * \param result The beginning of the sequence to copy to.
425
- * \param old_value The value to replace.
426
- * \param new_value The replacement value for which <tt>*i == old_value</tt> evaluates to \c true.
427
- * \return <tt>result + (last-first)</tt>
428
- *
429
- * \tparam DerivedPolicy The name of the derived execution policy.
430
- * \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>.
431
- * \tparam OutputIterator is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a>.
432
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html">Assignable</a>,
433
- * \p T is a model of <a href="http://www.sgi.com/tech/stl/EqualityComparable.html">Equality Comparable</a>,
434
- * \p T may be compared for equality with \p InputIterator's \c value_type,
435
- * and \p T is convertible to \p OutputIterator's \c value_type.
436
- *
437
- * \pre \p first may equal \p result, but the ranges <tt>[first, last)</tt> and <tt>[result, result + (last - first))</tt> shall not overlap otherwise.
438
- *
439
- * \code
440
- * #include <thrust/replace.h>
441
- * #include <thrust/device_vector.h>
442
- * #include <thrust/execution_policy.h>
443
- * ...
444
- * thrust::device_vector<int> A(4);
445
- * A[0] = 1;
446
- * A[1] = 2;
447
- * A[2] = 3;
448
- * A[3] = 1;
449
- *
450
- * thrust::device_vector<int> B(4);
451
- *
452
- * thrust::replace_copy(thrust::device, A.begin(), A.end(), B.begin(), 1, 99);
453
- *
454
- * // B contains [99, 2, 3, 99]
455
- * \endcode
456
- *
457
- * \see http://www.sgi.com/tech/stl/replace_copy.html
458
- * \see \c copy
459
- * \see \c replace
460
- * \see \c replace_if
461
- * \see \c replace_copy_if
462
- */
463
- template<typename DerivedPolicy, typename InputIterator, typename OutputIterator, typename T>
464
- __host__ __device__
465
- OutputIterator replace_copy(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
466
- InputIterator first, InputIterator last,
467
- OutputIterator result,
468
- const T &old_value,
469
- const T &new_value);
470
-
471
-
472
- /*! \p replace_copy copies elements from the range <tt>[first, last)</tt> to the range
473
- * <tt>[result, result + (last-first))</tt>, except that any element equal to \p old_value
474
- * is not copied; \p new_value is copied instead.
475
- *
476
- * More precisely, for every integer \c n such that <tt>0 <= n < last-first</tt>, \p replace_copy
477
- * performs the assignment <tt>*(result+n) = new_value</tt> if <tt>*(first+n) == old_value</tt>,
478
- * and <tt>*(result+n) = *(first+n)</tt> otherwise.
479
- *
480
- * \param first The beginning of the sequence to copy from.
481
- * \param last The end of the sequence to copy from.
482
- * \param result The beginning of the sequence to copy to.
483
- * \param old_value The value to replace.
484
- * \param new_value The replacement value for which <tt>*i == old_value</tt> evaluates to \c true.
485
- * \return <tt>result + (last-first)</tt>
486
- *
487
- * \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>.
488
- * \tparam OutputIterator is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a>.
489
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html">Assignable</a>,
490
- * \p T is a model of <a href="http://www.sgi.com/tech/stl/EqualityComparable.html">Equality Comparable</a>,
491
- * \p T may be compared for equality with \p InputIterator's \c value_type,
492
- * and \p T is convertible to \p OutputIterator's \c value_type.
493
- *
494
- * \pre \p first may equal \p result, but the ranges <tt>[first, last)</tt> and <tt>[result, result + (last - first))</tt> shall not overlap otherwise.
495
- *
496
- * \code
497
- * #include <thrust/replace.h>
498
- * #include <thrust/device_vector.h>
499
- * ...
500
- * thrust::device_vector<int> A(4);
501
- * A[0] = 1;
502
- * A[1] = 2;
503
- * A[2] = 3;
504
- * A[3] = 1;
505
- *
506
- * thrust::device_vector<int> B(4);
507
- *
508
- * thrust::replace_copy(A.begin(), A.end(), B.begin(), 1, 99);
509
- *
510
- * // B contains [99, 2, 3, 99]
511
- * \endcode
512
- *
513
- * \see http://www.sgi.com/tech/stl/replace_copy.html
514
- * \see \c copy
515
- * \see \c replace
516
- * \see \c replace_if
517
- * \see \c replace_copy_if
518
- */
519
- template<typename InputIterator, typename OutputIterator, typename T>
520
- OutputIterator replace_copy(InputIterator first, InputIterator last,
521
- OutputIterator result, const T &old_value,
522
- const T &new_value);
523
-
524
-
525
- /*! \p replace_copy_if copies elements from the range <tt>[first, last)</tt> to the range
526
- * <tt>[result, result + (last-first))</tt>, except that any element for which \p pred
527
- * is \c true is not copied; \p new_value is copied instead.
528
- *
529
- * More precisely, for every integer \c n such that 0 <= n < last-first,
530
- * \p replace_copy_if performs the assignment <tt>*(result+n) = new_value</tt> if
531
- * <tt>pred(*(first+n))</tt>, and <tt>*(result+n) = *(first+n)</tt> otherwise.
532
- *
533
- * The algorithm's execution is parallelized as determined by \p exec.
534
- *
535
- * \param exec The execution policy to use for parallelization.
536
- * \param first The beginning of the sequence to copy from.
537
- * \param last The end of the sequence to copy from.
538
- * \param result The beginning of the sequence to copy to.
539
- * \param pred The predicate to test on every value of the range <tt>[first,last)</tt>.
540
- * \param new_value The replacement value to assign <tt>pred(*i)</tt> evaluates to \c true.
541
- * \return <tt>result + (last-first)</tt>
542
- *
543
- * \tparam DerivedPolicy The name of the derived execution policy.
544
- * \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>,
545
- * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type.
546
- * \tparam OutputIterator is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a>.
547
- * \tparam Predicate is a model of <a href="http://www.sgi.com/tech/stl/Predicate.html">Predicate</a>.
548
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html">Assignable</a>,
549
- * and \p T is convertible to \p OutputIterator's \c value_type.
550
- *
551
- * \pre \p first may equal \p result, but the ranges <tt>[first, last)</tt> and <tt>[result, result + (last - first))</tt> shall not overlap otherwise.
552
- *
553
- * \code
554
- * #include <thrust/replace.h>
555
- * #include <thrust/device_vector.h>
556
- * #include <thrust/execution_policy.h>
557
- *
558
- * struct is_less_than_zero
559
- * {
560
- * __host__ __device__
561
- * bool operator()(int x)
562
- * {
563
- * return x < 0;
564
- * }
565
- * };
566
- *
567
- * ...
568
- *
569
- * thrust::device_vector<int> A(4);
570
- * A[0] = 1;
571
- * A[1] = -3;
572
- * A[2] = 2;
573
- * A[3] = -1;
574
-
575
- * thrust::device_vector<int> B(4);
576
- * is_less_than_zero pred;
577
- *
578
- * thrust::replace_copy_if(thrust::device, A.begin(), A.end(), B.begin(), pred, 0);
579
- *
580
- * // B contains [1, 0, 2, 0]
581
- * \endcode
582
- *
583
- * \see http://www.sgi.com/tech/stl/replace_copy_if.html
584
- * \see \c replace
585
- * \see \c replace_if
586
- * \see \c replace_copy
587
- */
588
- template<typename DerivedPolicy, typename InputIterator, typename OutputIterator, typename Predicate, typename T>
589
- __host__ __device__
590
- OutputIterator replace_copy_if(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
591
- InputIterator first, InputIterator last,
592
- OutputIterator result,
593
- Predicate pred,
594
- const T &new_value);
595
-
596
-
597
- /*! \p replace_copy_if copies elements from the range <tt>[first, last)</tt> to the range
598
- * <tt>[result, result + (last-first))</tt>, except that any element for which \p pred
599
- * is \c true is not copied; \p new_value is copied instead.
600
- *
601
- * More precisely, for every integer \c n such that 0 <= n < last-first,
602
- * \p replace_copy_if performs the assignment <tt>*(result+n) = new_value</tt> if
603
- * <tt>pred(*(first+n))</tt>, and <tt>*(result+n) = *(first+n)</tt> otherwise.
604
- *
605
- * \param first The beginning of the sequence to copy from.
606
- * \param last The end of the sequence to copy from.
607
- * \param result The beginning of the sequence to copy to.
608
- * \param pred The predicate to test on every value of the range <tt>[first,last)</tt>.
609
- * \param new_value The replacement value to assign <tt>pred(*i)</tt> evaluates to \c true.
610
- * \return <tt>result + (last-first)</tt>
611
- *
612
- * \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>,
613
- * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type.
614
- * \tparam OutputIterator is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a>.
615
- * \tparam Predicate is a model of <a href="http://www.sgi.com/tech/stl/Predicate.html">Predicate</a>.
616
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html">Assignable</a>,
617
- * and \p T is convertible to \p OutputIterator's \c value_type.
618
- *
619
- * \pre \p first may equal \p result, but the ranges <tt>[first, last)</tt> and <tt>[result, result + (last - first))</tt> shall not overlap otherwise.
620
- *
621
- * \code
622
- * #include <thrust/replace.h>
623
- * #include <thrust/device_vector.h>
624
- *
625
- * struct is_less_than_zero
626
- * {
627
- * __host__ __device__
628
- * bool operator()(int x)
629
- * {
630
- * return x < 0;
631
- * }
632
- * };
633
- *
634
- * ...
635
- *
636
- * thrust::device_vector<int> A(4);
637
- * A[0] = 1;
638
- * A[1] = -3;
639
- * A[2] = 2;
640
- * A[3] = -1;
641
-
642
- * thrust::device_vector<int> B(4);
643
- * is_less_than_zero pred;
644
- *
645
- * thrust::replace_copy_if(A.begin(), A.end(), B.begin(), pred, 0);
646
- *
647
- * // B contains [1, 0, 2, 0]
648
- * \endcode
649
- *
650
- * \see http://www.sgi.com/tech/stl/replace_copy_if.html
651
- * \see \c replace
652
- * \see \c replace_if
653
- * \see \c replace_copy
654
- */
655
- template<typename InputIterator, typename OutputIterator, typename Predicate, typename T>
656
- OutputIterator replace_copy_if(InputIterator first, InputIterator last,
657
- OutputIterator result,
658
- Predicate pred,
659
- const T &new_value);
660
-
661
-
662
- /*! This version of \p replace_copy_if copies elements from the range <tt>[first, last)</tt> to the range
663
- * <tt>[result, result + (last-first))</tt>, except that any element whose corresponding stencil
664
- * element causes \p pred to be \c true is not copied; \p new_value is copied instead.
665
- *
666
- * More precisely, for every integer \c n such that <tt>0 <= n < last-first</tt>,
667
- * \p replace_copy_if performs the assignment <tt>*(result+n) = new_value</tt> if
668
- * <tt>pred(*(stencil+n))</tt>, and <tt>*(result+n) = *(first+n)</tt> otherwise.
669
- *
670
- * The algorithm's execution is parallelized as determined by \p exec.
671
- *
672
- * \param exec The execution policy to use for parallelization.
673
- * \param first The beginning of the sequence to copy from.
674
- * \param last The end of the sequence to copy from.
675
- * \param stencil The beginning of the stencil sequence.
676
- * \param result The beginning of the sequence to copy to.
677
- * \param pred The predicate to test on every value of the range <tt>[stencil, stencil + (last - first))</tt>.
678
- * \param new_value The replacement value to assign when <tt>pred(*s)</tt> evaluates to \c true.
679
- * \return <tt>result + (last-first)</tt>
680
- *
681
- * \tparam DerivedPolicy The name of the derived execution policy.
682
- * \tparam InputIterator1 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>.
683
- * \tparam InputIterator2 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>
684
- * and \p InputIterator2's \c value_type is convertible to \p Predicate's \c argument_type.
685
- * \tparam OutputIterator is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a>.
686
- * \tparam Predicate is a model of <a href="http://www.sgi.com/tech/stl/Predicate.html">Predicate</a>.
687
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html">Assignable</a>,
688
- * and \p T is convertible to \p OutputIterator's \c value_type.
689
- *
690
- * \pre \p first may equal \p result, but the ranges <tt>[first, last)</tt> and <tt>[result, result + (last - first))</tt> shall not overlap otherwise.
691
- * \pre \p stencil may equal \p result, but the ranges <tt>[stencil, stencil + (last - first))</tt> and <tt>[result, result + (last - first))</tt> shall not overlap otherwise.
692
- *
693
- * \code
694
- * #include <thrust/replace.h>
695
- * #include <thrust/device_vector.h>
696
- * #include <thrust/execution_policy.h>
697
- *
698
- * struct is_less_than_zero
699
- * {
700
- * __host__ __device__
701
- * bool operator()(int x)
702
- * {
703
- * return x < 0;
704
- * }
705
- * };
706
- *
707
- * ...
708
- *
709
- * thrust::device_vector<int> A(4);
710
- * A[0] = 10;
711
- * A[1] = 20;
712
- * A[2] = 30;
713
- * A[3] = 40;
714
- *
715
- * thrust::device_vector<int> S(4);
716
- * S[0] = -1;
717
- * S[1] = 0;
718
- * S[2] = -1;
719
- * S[3] = 0;
720
- *
721
- * thrust::device_vector<int> B(4);
722
- * is_less_than_zero pred;
723
- *
724
- * thrust::replace_if(thrust::device, A.begin(), A.end(), S.begin(), B.begin(), pred, 0);
725
- *
726
- * // B contains [0, 20, 0, 40]
727
- * \endcode
728
- *
729
- * \see \c replace_copy
730
- * \see \c replace_if
731
- */
732
- template<typename DerivedPolicy, typename InputIterator1, typename InputIterator2, typename OutputIterator, typename Predicate, typename T>
733
- __host__ __device__
734
- OutputIterator replace_copy_if(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
735
- InputIterator1 first, InputIterator1 last,
736
- InputIterator2 stencil,
737
- OutputIterator result,
738
- Predicate pred,
739
- const T &new_value);
740
-
741
-
742
- /*! This version of \p replace_copy_if copies elements from the range <tt>[first, last)</tt> to the range
743
- * <tt>[result, result + (last-first))</tt>, except that any element whose corresponding stencil
744
- * element causes \p pred to be \c true is not copied; \p new_value is copied instead.
745
- *
746
- * More precisely, for every integer \c n such that <tt>0 <= n < last-first</tt>,
747
- * \p replace_copy_if performs the assignment <tt>*(result+n) = new_value</tt> if
748
- * <tt>pred(*(stencil+n))</tt>, and <tt>*(result+n) = *(first+n)</tt> otherwise.
749
- *
750
- * \param first The beginning of the sequence to copy from.
751
- * \param last The end of the sequence to copy from.
752
- * \param stencil The beginning of the stencil sequence.
753
- * \param result The beginning of the sequence to copy to.
754
- * \param pred The predicate to test on every value of the range <tt>[stencil, stencil + (last - first))</tt>.
755
- * \param new_value The replacement value to assign when <tt>pred(*s)</tt> evaluates to \c true.
756
- * \return <tt>result + (last-first)</tt>
757
- *
758
- * \tparam InputIterator1 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>.
759
- * \tparam InputIterator2 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>
760
- * and \p InputIterator2's \c value_type is convertible to \p Predicate's \c argument_type.
761
- * \tparam OutputIterator is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a>.
762
- * \tparam Predicate is a model of <a href="http://www.sgi.com/tech/stl/Predicate.html">Predicate</a>.
763
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html">Assignable</a>,
764
- * and \p T is convertible to \p OutputIterator's \c value_type.
765
- *
766
- * \pre \p first may equal \p result, but the ranges <tt>[first, last)</tt> and <tt>[result, result + (last - first))</tt> shall not overlap otherwise.
767
- * \pre \p stencil may equal \p result, but the ranges <tt>[stencil, stencil + (last - first))</tt> and <tt>[result, result + (last - first))</tt> shall not overlap otherwise.
768
- *
769
- * \code
770
- * #include <thrust/replace.h>
771
- * #include <thrust/device_vector.h>
772
- *
773
- * struct is_less_than_zero
774
- * {
775
- * __host__ __device__
776
- * bool operator()(int x)
777
- * {
778
- * return x < 0;
779
- * }
780
- * };
781
- *
782
- * ...
783
- *
784
- * thrust::device_vector<int> A(4);
785
- * A[0] = 10;
786
- * A[1] = 20;
787
- * A[2] = 30;
788
- * A[3] = 40;
789
- *
790
- * thrust::device_vector<int> S(4);
791
- * S[0] = -1;
792
- * S[1] = 0;
793
- * S[2] = -1;
794
- * S[3] = 0;
795
- *
796
- * thrust::device_vector<int> B(4);
797
- * is_less_than_zero pred;
798
- *
799
- * thrust::replace_if(A.begin(), A.end(), S.begin(), B.begin(), pred, 0);
800
- *
801
- * // B contains [0, 20, 0, 40]
802
- * \endcode
803
- *
804
- * \see \c replace_copy
805
- * \see \c replace_if
806
- */
807
- template<typename InputIterator1, typename InputIterator2, typename OutputIterator, typename Predicate, typename T>
808
- OutputIterator replace_copy_if(InputIterator1 first, InputIterator1 last,
809
- InputIterator2 stencil,
810
- OutputIterator result,
811
- Predicate pred,
812
- const T &new_value);
813
-
814
-
815
- /*! \} // end replacing
816
- * \} // transformations
817
- */
818
-
819
-
820
- } // end thrust
821
-
822
- #include <thrust/detail/replace.inl>
823
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/tbb/memory.h DELETED
@@ -1,99 +0,0 @@
1
- /*
2
- * Copyright 2008-2018 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- /*! \file thrust/system/tbb/memory.h
18
- * \brief Managing memory associated with Thrust's TBB system.
19
- */
20
-
21
- #pragma once
22
-
23
- #include <thrust/detail/config.h>
24
- #include <thrust/system/tbb/memory_resource.h>
25
- #include <thrust/memory.h>
26
- #include <thrust/detail/type_traits.h>
27
- #include <thrust/mr/allocator.h>
28
- #include <ostream>
29
-
30
- namespace thrust
31
- {
32
- namespace system
33
- {
34
- namespace tbb
35
- {
36
-
37
- /*! Allocates an area of memory available to Thrust's <tt>tbb</tt> system.
38
- * \param n Number of bytes to allocate.
39
- * \return A <tt>tbb::pointer<void></tt> pointing to the beginning of the newly
40
- * allocated memory. A null <tt>tbb::pointer<void></tt> is returned if
41
- * an error occurs.
42
- * \note The <tt>tbb::pointer<void></tt> returned by this function must be
43
- * deallocated with \p tbb::free.
44
- * \see tbb::free
45
- * \see std::malloc
46
- */
47
- inline pointer<void> malloc(std::size_t n);
48
-
49
- /*! Allocates a typed area of memory available to Thrust's <tt>tbb</tt> system.
50
- * \param n Number of elements to allocate.
51
- * \return A <tt>tbb::pointer<T></tt> pointing to the beginning of the newly
52
- * allocated memory. A null <tt>tbb::pointer<T></tt> is returned if
53
- * an error occurs.
54
- * \note The <tt>tbb::pointer<T></tt> returned by this function must be
55
- * deallocated with \p tbb::free.
56
- * \see tbb::free
57
- * \see std::malloc
58
- */
59
- template<typename T>
60
- inline pointer<T> malloc(std::size_t n);
61
-
62
- /*! Deallocates an area of memory previously allocated by <tt>tbb::malloc</tt>.
63
- * \param ptr A <tt>tbb::pointer<void></tt> pointing to the beginning of an area
64
- * of memory previously allocated with <tt>tbb::malloc</tt>.
65
- * \see tbb::malloc
66
- * \see std::free
67
- */
68
- inline void free(pointer<void> ptr);
69
-
70
- /*! \p tbb::allocator is the default allocator used by the \p tbb system's containers such as
71
- * <tt>tbb::vector</tt> if no user-specified allocator is provided. \p tbb::allocator allocates
72
- * (deallocates) storage with \p tbb::malloc (\p tbb::free).
73
- */
74
- template<typename T>
75
- using allocator = thrust::mr::stateless_resource_allocator<T, memory_resource>;
76
-
77
- } // end tbb
78
-
79
- /*! \}
80
- */
81
-
82
- } // end system
83
-
84
- /*! \namespace thrust::tbb
85
- * \brief \p thrust::tbb is a top-level alias for thrust::system::tbb.
86
- */
87
- namespace tbb
88
- {
89
-
90
- using thrust::system::tbb::malloc;
91
- using thrust::system::tbb::free;
92
- using thrust::system::tbb::allocator;
93
-
94
- } // end tbb
95
-
96
- } // end thrust
97
-
98
- #include <thrust/system/tbb/detail/memory.inl>
99
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/detectors/mask_scoring_rcnn.py DELETED
@@ -1,27 +0,0 @@
1
- from ..builder import DETECTORS
2
- from .two_stage import TwoStageDetector
3
-
4
-
5
- @DETECTORS.register_module()
6
- class MaskScoringRCNN(TwoStageDetector):
7
- """Mask Scoring RCNN.
8
-
9
- https://arxiv.org/abs/1903.00241
10
- """
11
-
12
- def __init__(self,
13
- backbone,
14
- rpn_head,
15
- roi_head,
16
- train_cfg,
17
- test_cfg,
18
- neck=None,
19
- pretrained=None):
20
- super(MaskScoringRCNN, self).__init__(
21
- backbone=backbone,
22
- neck=neck,
23
- rpn_head=rpn_head,
24
- roi_head=roi_head,
25
- train_cfg=train_cfg,
26
- test_cfg=test_cfg,
27
- pretrained=pretrained)