parquet-converter commited on
Commit
7d7bc0e
·
1 Parent(s): 967481b

Update parquet files (step 80 of 121)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/101-5/gpt4free/g4f/Provider/Providers/DeepAi.py +0 -46
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Artcam 2018 Xforce Keygen Unlimited Creativity.md +0 -30
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Discover the Power and Versatility of Vectric Aspire 9.514 Free Download.md +0 -36
  4. spaces/1gistliPinn/ChatGPT4/Examples/Binding Of Isaac Rebirth 1.041 Download.md +0 -11
  5. spaces/1gistliPinn/ChatGPT4/Examples/Biology Today And Tomorrow Starr Pdf Download.md +0 -6
  6. spaces/1phancelerku/anime-remove-background/Download Play Together for PC and Mac A Casual Game with a Variety of Events and Modes.md +0 -97
  7. spaces/1toTree/lora_test/ppdiffusers/pipelines/unclip/text_proj.py +0 -88
  8. spaces/AI-Hobbyist/Hoyo-RVC/i18n.py +0 -28
  9. spaces/AIFILMS/generate_human_motion/pyrender/tests/unit/test_cameras.py +0 -164
  10. spaces/AIGC-Audio/AudioGPT/NeuralSeq/tasks/svs/diffspeech_task.py +0 -122
  11. spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/model.py +0 -913
  12. spaces/AIGC-Audio/AudioGPT/text_to_speech/data_gen/tts/runs/preprocess.py +0 -17
  13. spaces/AIhackrOrg/README/README.md +0 -10
  14. spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/modules/streaming.py +0 -135
  15. spaces/AbdoulGafar/woodsound/app.py +0 -73
  16. spaces/Adapter/CoAdapter/ldm/models/diffusion/dpm_solver/__init__.py +0 -1
  17. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fullwindowrectangle/Factory.js +0 -13
  18. spaces/AlexWang/lama/saicinpainting/evaluation/masks/countless/README.md +0 -25
  19. spaces/Androidonnxfork/CivitAi-to-Diffusers/README.md +0 -13
  20. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/Makefile +0 -96
  21. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/audioldm.md +0 -51
  22. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/dpm_discrete.md +0 -22
  23. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_ddpm_original_checkpoint_to_diffusers.py +0 -431
  24. spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/faster_rcnn.py +0 -24
  25. spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/options/train_options.py +0 -30
  26. spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/deoldify/save.py +0 -29
  27. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/index/package_finder.py +0 -1029
  28. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/abc.py +0 -33
  29. spaces/Audio-AGI/WavJourney/scripts/kill_services.py +0 -11
  30. spaces/Audio-AGI/WavJourney/share_btn.py +0 -74
  31. spaces/Awesimo/jojogan/e4e/criteria/lpips/utils.py +0 -30
  32. spaces/Bart92/RVC_HF/julius/fftconv.py +0 -183
  33. spaces/Benson/text-generation/Examples/Aqu Vamos Apk Versin Antigua.md +0 -107
  34. spaces/Benson/text-generation/Examples/Descargar El Juego Growtopia Mod Apk.md +0 -62
  35. spaces/Benson/text-generation/Examples/Descargar Facebook Lite Mod Apk 2020.md +0 -95
  36. spaces/Benson/text-generation/Examples/Descargar Fr Leyendas Hiace Mod Apk.md +0 -49
  37. spaces/BernardoOlisan/vqganclip/taming-transformers/taming/data/sflckr.py +0 -91
  38. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pyparsing/results.py +0 -760
  39. spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/packages/__init__.py +0 -0
  40. spaces/BilalSardar/Lyrics-Text_to_music/TMIDI.py +0 -0
  41. spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/count.h +0 -80
  42. spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/transform_reduce.h +0 -68
  43. spaces/CVPR/monoscene_lite/monoscene/app.py +0 -138
  44. spaces/CVPR/transfiner/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ.py +0 -29
  45. spaces/ChihChiu29/mychatbot/Dockerfile +0 -24
  46. spaces/CikeyQI/meme-api/meme_generator/memes/make_friend/__init__.py +0 -50
  47. spaces/CofAI/chat.b4/g4f/Provider/Provider.py +0 -16
  48. spaces/CofAI/sd-2.1/sd-2-1.py +0 -16
  49. spaces/Cropinky/hana_hanak_houses/realesrgan/data/__init__.py +0 -10
  50. spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/rpn/loss.py +0 -153
spaces/101-5/gpt4free/g4f/Provider/Providers/DeepAi.py DELETED
@@ -1,46 +0,0 @@
1
- import os
2
- import json
3
- import random
4
- import hashlib
5
- import requests
6
-
7
- from ...typing import sha256, Dict, get_type_hints
8
-
9
- url = 'https://deepai.org'
10
- model = ['gpt-3.5-turbo']
11
- supports_stream = True
12
- needs_auth = False
13
-
14
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
15
- def md5(text: str) -> str:
16
- return hashlib.md5(text.encode()).hexdigest()[::-1]
17
-
18
-
19
- def get_api_key(user_agent: str) -> str:
20
- part1 = str(random.randint(0, 10**11))
21
- part2 = md5(user_agent + md5(user_agent + md5(user_agent + part1 + "x")))
22
-
23
- return f"tryit-{part1}-{part2}"
24
-
25
- user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
26
-
27
- headers = {
28
- "api-key": get_api_key(user_agent),
29
- "user-agent": user_agent
30
- }
31
-
32
- files = {
33
- "chat_style": (None, "chat"),
34
- "chatHistory": (None, json.dumps(messages))
35
- }
36
-
37
- r = requests.post("https://api.deepai.org/chat_response", headers=headers, files=files, stream=True)
38
-
39
- for chunk in r.iter_content(chunk_size=None):
40
- r.raise_for_status()
41
- yield chunk.decode()
42
-
43
-
44
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
45
- '(%s)' % ', '.join(
46
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Artcam 2018 Xforce Keygen Unlimited Creativity.md DELETED
@@ -1,30 +0,0 @@
1
-
2
- # How to Install Artcam 2018 with Xforce Keygen
3
-
4
- Artcam 2018 is a powerful software for designing and machining 2D and 3D models. It is widely used by hobbyists, professionals, and educators in various fields such as woodworking, jewelry making, sign making, and engraving. However, installing Artcam 2018 can be tricky if you don't have a valid license or activation code. In this article, we will show you how to install Artcam 2018 with Xforce keygen, a tool that can generate serial numbers and activation codes for any Autodesk product.
5
-
6
- ## What is Xforce Keygen?
7
-
8
- Xforce keygen is a software that can create serial numbers and activation codes for any Autodesk product, including Artcam 2018. It is also known as a crack or a patch, as it bypasses the official registration process and allows you to use the software for free. However, using Xforce keygen is illegal and risky, as it may contain viruses or malware that can harm your computer or compromise your personal data. Therefore, we do not recommend using Xforce keygen or any other similar tool to install Artcam 2018 or any other software.
9
-
10
- ## How to Install Artcam 2018 with Xforce Keygen?
11
-
12
- If you still want to install Artcam 2018 with Xforce keygen, you will need to follow these steps:
13
-
14
- 1. Download Artcam 2018 from the official website or a trusted source. You will need to create an account and provide some information to download the software.
15
- 2. Download Xforce keygen from a reliable source. You can search online for the latest version of Xforce keygen that supports Artcam 2018. Make sure to scan the file with an antivirus program before opening it.
16
- 3. Disable your internet connection and antivirus program. This is necessary to prevent the software from detecting and blocking Xforce keygen.
17
- 4. Run the Artcam 2018 setup file and follow the instructions. When prompted to enter a serial number and a product key, click on "I have an activation code from Autodesk".
18
- 5. Run Xforce keygen as administrator and select "Artcam 2018" from the drop-down menu. Click on "Generate" to create a serial number and a product key. Copy and paste them into the corresponding fields in the Artcam 2018 installation window.
19
- 6. Click on "Next" and then on "Request an activation code using an offline method". Copy the request code that appears in the installation window.
20
- 7. Go back to Xforce keygen and click on "Patch". A message will appear saying that the patching process was successful.
21
- 8. Click on "Activate" and paste the request code into the field. Click on "Generate" to create an activation code. Copy and paste it into the field in the Artcam 2018 installation window.
22
- 9. Click on "Next" and then on "Finish" to complete the installation process.
23
- 10. Restart your computer and enjoy using Artcam 2018.
24
-
25
- ## Conclusion
26
-
27
- Artcam 2018 is a great software for creating and machining 2D and 3D models, but it requires a valid license or activation code to use it. If you don't have one, you can try to install Artcam 2018 with Xforce keygen, a tool that can generate serial numbers and activation codes for any Autodesk product. However, this method is illegal and risky, as it may expose you to viruses or malware that can damage your computer or steal your personal data. Therefore, we advise you to purchase a legitimate license or activation code from the official website or an authorized dealer instead of using Xforce keygen or any other similar tool.</p>
28
- <h2>artcam 2018 crack xforce download</h2><br /><p><b><b>DOWNLOAD</b> &#128504; <a href="https://byltly.com/2uKwNN">https://byltly.com/2uKwNN</a></b></p><br /><br /> ddb901b051<br />
29
- <br />
30
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Discover the Power and Versatility of Vectric Aspire 9.514 Free Download.md DELETED
@@ -1,36 +0,0 @@
1
- <br />
2
- <h1>Vectric Aspire 9.514 Free Download: A Powerful Software for 3D Modeling and CNC Machining</h1>
3
- <p>Vectric Aspire 9.514 is a software that allows you to create detailed 3D models, draw, cut and route for production design and woodworking. It is widely used by architects, woodworkers, hobbyists and makers to design and cut decorative panels, doors, signs, furniture and more.</p>
4
- <p>In this article, we will show you how to download Vectric Aspire 9.514 free trial and what are the main features and benefits of this software.</p>
5
- <h2>vectric aspire 9.514 free download</h2><br /><p><b><b>Download File</b> &#9733;&#9733;&#9733;&#9733;&#9733; <a href="https://byltly.com/2uKyM6">https://byltly.com/2uKyM6</a></b></p><br /><br />
6
- <h2>How to Download Vectric Aspire 9.514 Free Trial</h2>
7
- <p>Vectric offers a free trial version of Aspire that you can download from their official website. The free trial is not time-limited, nor does it require you to sign up with your personal details. You can test all the elements of the software and cut some free projects on your CNC machine to check the compatibility.</p>
8
- <p>To download the free trial, you need to follow these steps:</p>
9
- <ol>
10
- <li>Go to <a href="https://www.vectric.com/free-trial/aspire">https://www.vectric.com/free-trial/aspire</a> and click on the "Download Free Trial" button.</li>
11
- <li>Fill in the form with your name, email address and country.</li>
12
- <li>Choose your preferred language and measurement units.</li>
13
- <li>Click on the "Download Now" button and save the file on your computer.</li>
14
- <li>Run the installer and follow the instructions to complete the installation.</li>
15
- </ol>
16
- <p>Once you have installed the software, you can launch it and start exploring its features. You can also watch some video tutorials and download some free projects from the Vectric website to help you get started.</p>
17
- <h2>What are the Main Features and Benefits of Vectric Aspire 9.514</h2>
18
- <p>Vectric Aspire 9.514 is a powerful software that combines 2D drawing and 3D modeling tools with CNC machining capabilities. Here are some of the main features and benefits of this software:</p>
19
- <ul>
20
- <li>You can create 2D vectors or import them from other software such as SketchUp, AutoCAD, Illustrator, etc.</li>
21
- <li>You can create 3D models from 2D vectors using various methods such as extrusion, sweeping, lofting, etc.</li>
22
- <li>You can edit, sculpt and blend 3D models using various tools such as smoothing, carving, distortion, etc.</li>
23
- <li>You can import 3D models from other software such as STL, OBJ, DXF, etc.</li>
24
- <li>You can create 3D textures from image files or use the built-in library of textures.</li>
25
- <li>You can generate high-quality toolpaths for roughing and finishing operations using various strategies such as pocketing, profiling, drilling, etc.</li>
26
- <li>You can preview and simulate the toolpaths before cutting them on your CNC machine.</li>
27
- <li>You can export 2D or 3D models as clip-art or files for other software such as PDF, SVG, EPS, etc.</li>
28
- </ul>
29
- <p>Vectric Aspire 9.514 is a versatile and user-friendly software that can help you create stunning 3D projects for your CNC machine. Whether you are a professional or a hobbyist, you can use this software to design and cut anything from signs and logos to furniture and sculptures.</p>
30
- <p></p>
31
- <h2>Conclusion</h2>
32
- <p>Vectric Aspire 9.514 is a software that allows you to create detailed 3D models, draw, cut and route for production design and woodworking. It is widely used by architects, woodworkers, hobbyists and makers to design and cut decorative panels, doors, signs, furniture and more.</p>
33
- <p>You can download a free trial version of Aspire from the Vectric website and test all the features without any time limit or sign-up requirement. You can also watch some video tutorials and download some free projects to help you get started.</p>
34
- <p>If you are looking for a powerful software for 3D modeling and CNC machining, you should give Vectric Aspire 9.514 a try. You will be amazed by what you can create with this software.</p> ddb901b051<br />
35
- <br />
36
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Binding Of Isaac Rebirth 1.041 Download.md DELETED
@@ -1,11 +0,0 @@
1
- <br />
2
- <p>binding of isaac rebirth 1.041 update is now available. binding of isaac rebirth 1.041 is available on this site.041 is very popular file these days. you can download binding of isaac rebirth 1.041 directly on your mac.041 free on your windows os. it was released on 2018 year. you can also download booting in labyrinth 2 from here. here is the full list of features of binding of isaac rebirth 1.041:</p>
3
- <h2>Binding Of Isaac Rebirth 1.041 Download</h2><br /><p><b><b>DOWNLOAD</b> ===== <a href="https://imgfil.com/2uy1BE">https://imgfil.com/2uy1BE</a></b></p><br /><br />
4
- <p>binding of isaac rebirth 1.041 is a platformer. binding of isaac rebirth 1.041 game size is 2.59 mb.041 is developed by klei entertainment.041 published by team meat on 02/01/2017.041 updated on 02/01/2017.</p>
5
- <p>binding of isaac rebirth 1.041 is very popular game all over the world. if you want to play this game, you can download binding of isaac rebirth 1.041 directly on your pc or mac. it was released on 02/01/2017 and has been downloaded from our website more than 2.500.000 times. you can also play other games on our website.</p>
6
- <p>binding of isaac rebirth 1.041 is available for pc users in the version 1.041. mac users can download binding of isaac rebirth 1.041 directly on their pc. binding of isaac rebirth 1.041 for linux users is not supported yet.</p>
7
- <p></p>
8
- <p>binding of isaac rebirth 1.041 patch is now available to download. the latest update has been published on jan 17, 2014 and all the previous changes that were present in 1.041 are present in 1.042 patch as well. the new update brings with it new improvements and bug fixes. check out the details below.</p>
9
- <p>binding of isaac rebirth 1.041 patch is available on the moddb.com and moddb.com. if you have already downloaded the update from the link above then you don’t need to download it again. otherwise, download binding of isaac rebirth 1.041 patch from the link given above and follow the instructions to install it on your pc. you can also check the video tutorial below to know the steps to install it on your pc.</p> 899543212b<br />
10
- <br />
11
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Biology Today And Tomorrow Starr Pdf Download.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>biology today and tomorrow starr pdf download</h2><br /><p><b><b>DOWNLOAD</b> &#8250;&#8250;&#8250;&#8250;&#8250; <a href="https://imgfil.com/2uy1UU">https://imgfil.com/2uy1UU</a></b></p><br /><br />
2
- <br />
3
- Biology: A Human Emphasis by Cecie Starr in EPUB, FB2, FB3 download ... starr cecie Biology Today and Tomorrow without Physiology free PDF Biology Today ... 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Play Together for PC and Mac A Casual Game with a Variety of Events and Modes.md DELETED
@@ -1,97 +0,0 @@
1
- <br />
2
- <h1>How to Play Play Together APK on PC</h1>
3
- <p>Do you love playing casual games that let you socialize with your friends online? Do you want to experience a virtual world where you can customize your character and enjoy various activities? Do you wish to play a game that supports multiple platforms and devices? If you answered yes to any of these questions, then you should try Play Together APK, a game that offers all these features and more. In this article, we will show you how to play Play Together APK on PC with BlueStacks, the best Android emulator for gaming. We will also give you some tips and tricks to help you get started with the game.</p>
4
- <h2>What is Play Together APK?</h2>
5
- <p>Play Together APK is a casual game developed by Haegin Co., Ltd. It is a game that welcomes you to its virtual world, where you are free to perform several activities and events with your friends online. You can catch up with old friends or make new ones from all over the world, the more, the merrier!</p>
6
- <h2>play together apk pc</h2><br /><p><b><b>Download</b> &#10042; <a href="https://jinyurl.com/2uNMqH">https://jinyurl.com/2uNMqH</a></b></p><br /><br />
7
- <h3>A casual game that lets you socialize with friends online</h3>
8
- <p>Play Together APK is a game that focuses on social interaction and communication. You can chat with other players, send them gifts, invite them to your home, or join them in various mini-games. You can also join clubs, parties, festivals, and other events that happen regularly in the game. You can even get married or adopt a pet if you want!</p>
9
- <h3>A virtual world where you can customize your character and enjoy various activities</h3>
10
- <p>Play Together APK is a game that gives you the freedom to create your own character and style. You can choose from different costumes, hairstyles, accessories, and more. You can also decorate your home with furniture, wallpapers, plants, and other items. You can even drive cars, ride bikes, or fly planes around the island of Kaia, where the game takes place.</p>
11
- <h3>A game that supports multiple platforms and devices</h3>
12
- <p>Play Together APK is a game that can be played on various devices, such as Android phones, tablets, PCs, or Macs. You can also play it on your browser without downloading anything. The game supports cross-play, which means you can play with anyone regardless of their device or platform.</p>
13
- <h2>Why play Play Together APK on PC?</h2>
14
- <p>While Play Together APK is a great game to play on mobile devices, playing it on PC has its own advantages. Here are some of them:</p>
15
- <h3>Enjoy a larger screen and better graphics</h3>
16
- <p>Playing Play Together APK on PC allows you to see the game in full HD resolution and enjoy its colorful graphics and animations. You can also adjust the settings to suit your preferences and optimize the performance of the game. You will be able to appreciate the details of the characters, the environment, and the items more clearly.</p>
17
- <h3>Use keyboard and mouse controls for easier gameplay</h3>
18
- <p>Playing Play Together APK on PC gives you the option to use keyboard and mouse controls instead of touch controls for easier gameplay. You can also customize the key mapping and assign different functions to different keys. This way, you can move around, interact, and chat with other players more smoothly and conveniently.</p>
19
- <p>play together game download for pc<br />
20
- play together app on pc<br />
21
- how to play together on pc<br />
22
- play together emulator for pc<br />
23
- play together online on pc<br />
24
- play together pc version<br />
25
- play together pc requirements<br />
26
- play together pc gameplay<br />
27
- play together pc free download<br />
28
- play together pc bluestacks<br />
29
- play together pc windows 10<br />
30
- play together pc mod apk<br />
31
- play together pc hack<br />
32
- play together pc cheat<br />
33
- play together pc guide<br />
34
- play together pc tips and tricks<br />
35
- play together pc review<br />
36
- play together pc update<br />
37
- play together pc best settings<br />
38
- play together pc keyboard controls<br />
39
- play together pc graphics settings<br />
40
- play together pc lag fix<br />
41
- play together pc system requirements<br />
42
- play together pc nox player<br />
43
- play together pc memu emulator<br />
44
- play together pc ldplayer<br />
45
- play together pc gameloop<br />
46
- play together pc apk pure<br />
47
- play together pc uptodown<br />
48
- play together pc apk download<br />
49
- play together vng on pc<br />
50
- how to install play together on pc<br />
51
- how to run play together on pc<br />
52
- how to get play together on pc<br />
53
- how to download and install play together on your windows 10/8/7/vista/xp and mac computer using bluestacks android emulator.<br />
54
- how to use bluestacks to play android games like play together on your laptop or desktop computer.<br />
55
- how to enjoy the virtual world of play together with your friends from all over the world on your computer screen.<br />
56
- how to customize your character and explore various mini-games in play together on your pc.<br />
57
- how to drive cars, dance, camp, fish, and more in play together on your pc.<br />
58
- how to take care of your pets and decorate your home in play together on your pc.</p>
59
- <h3>Access more features and enhancements with BlueStacks</h3>
60
- <p>Playing Play Together APK on PC with BlueStacks gives you access to more features and enhancements that can improve your gaming experience. For example, you can use the Multi-Instance feature to play multiple accounts at the same time, or the Macro feature to automate repetitive tasks. You can also use the Screen Recorder feature to capture your gameplay and share it with others, or the Real-time Translation feature to communicate with players from different countries.</p>
61
- <h2>How to download and install Play Together APK on PC?</h2>
62
- <p>Downloading and installing Play Together APK on PC is easy and fast with BlueStacks. Here are the steps you need to follow:</p>
63
- <h3>Download and install BlueStacks on your PC or Mac</h3>
64
- <p>BlueStacks is the best Android emulator for gaming, and you can download it for free from its official website. Just click on the download button and follow the instructions to install it on your PC or Mac. It will only take a few minutes, and you will be ready to play.</p>
65
- <h3>Sign in to Google and search for Play Together APK in the Play Store</h3>
66
- <p>Once you have installed BlueStacks, launch it and sign in to your Google account. This will allow you to access the Google Play Store, where you can find Play Together APK. Just type the name of the game in the search bar and click on the install button. The game will be downloaded and installed automatically on your PC or Mac.</p>
67
- <h3>Install the game and launch it from the home screen</h3>
68
- <p>After the installation is complete, you can launch Play Together APK from the home screen of BlueStacks. You will see the game icon among other apps that you have installed. Just click on it and start playing. You can also create a shortcut on your desktop for easier access.</p>
69
- <h2>Tips and tricks for playing Play Together APK on PC</h2>
70
- <p>Now that you know how to play Play Together APK on PC, here are some tips and tricks that can help you enjoy the game more:</p>
71
- <h3>Explore the island of Kaia and discover its secrets</h3>
72
- <p>The island of Kaia is where Play Together APK takes place, and it is full of surprises and secrets. You can explore different areas, such as the beach, the forest, the city, and more. You can also find hidden items, quests, and events that can reward you with coins, gems, or other prizes. You never know what you might find, so be curious and adventurous.</p>
73
- <h3>Join the Game Party mode and compete with other players</h3>
74
- <p>If you want to have some fun and challenge yourself, you can join the Game Party mode in Play Together APK. This is a mode where you can play various mini-games with other players, such as racing, fishing, cooking, dancing, and more. You can earn points by winning or participating in these games, and exchange them for rewards or rankings. You can also chat with other players and make new friends.</p>
75
- <h3>Customize your character and your home with various outfits and items</h3>
76
- <p>One of the best features of Play Together APK is that you can customize your character and your home with various outfits and items. You can buy or earn different costumes, hairstyles, accessories, and more from shops or events. You can also decorate your home with furniture, wallpapers, plants, and other items from the market or gifts from friends. You can express your personality and style with these options.</p>
77
- <h2>Conclusion</h2>
78
- <p>Play Together APK is a fun and relaxing game that you can enjoy with your friends online. You can socialize with other players, perform various activities, and explore a virtual world. Playing it on PC with BlueStacks gives you more advantages and convenience. You can enjoy a larger screen, better graphics, keyboard and mouse controls, and more features and enhancements. Download Play Together APK on PC today and start your adventure!</p>
79
- <h2>Frequently Asked Questions</h2>
80
- <ul>
81
- <li><b>Is Play Together APK free to play?</b></li>
82
- <p>Yes, Play Together APK is free to play. However, it also offers in-app purchases that can enhance your gameplay or unlock more items.</p>
83
- <li><b>Can I play Play Together APK offline?</b></li>
84
- <p>No, Play Together APK requires an internet connection to play. You need to be online to access the game servers and interact with other players.</p>
85
- <li><b>Can I play Play Together APK with my friends?</b></li>
86
- <p>Yes Yes, you can play Play Together APK with your friends. You can invite them to join you in the game, or add them as friends from the game menu. You can also join clubs, parties, festivals, and other events with your friends.</p>
87
- <li><b>How can I get more coins and gems in Play Together APK?</b></li>
88
- <p>There are several ways to get more coins and gems in Play Together APK. You can earn them by playing mini-games, completing quests, participating in events, or watching ads. You can also buy them with real money from the game store.</p>
89
- <li><b>How can I contact the game developers or report a problem?</b></li>
90
- <p>If you have any questions, suggestions, or issues with Play Together APK, you can contact the game developers or report a problem through the following channels:</p>
91
- <ul>
92
- <li>Email: [email protected]</li>
93
- <li>Facebook: https://www.facebook.com/PlayTogetherGame</li>
94
- <li>Discord: https://discord.gg/playtogether</li>
95
- </ul></p> 401be4b1e0<br />
96
- <br />
97
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/pipelines/unclip/text_proj.py DELETED
@@ -1,88 +0,0 @@
1
- # Copyright 2022 Kakao Brain and The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import paddle
16
- from paddle import nn
17
-
18
- from ...configuration_utils import ConfigMixin, register_to_config
19
- from ...modeling_utils import ModelMixin
20
-
21
-
22
- class UnCLIPTextProjModel(ModelMixin, ConfigMixin):
23
- """
24
- Utility class for CLIP embeddings. Used to combine the image and text embeddings into a format usable by the
25
- decoder.
26
-
27
- For more details, see the original paper: https://arxiv.org/abs/2204.06125 section 2.1
28
- """
29
-
30
- @register_to_config
31
- def __init__(
32
- self,
33
- *,
34
- clip_extra_context_tokens: int = 4,
35
- clip_embeddings_dim: int = 768,
36
- time_embed_dim: int,
37
- cross_attention_dim,
38
- ):
39
- super().__init__()
40
-
41
- self.learned_classifier_free_guidance_embeddings = self.create_parameter(
42
- (clip_embeddings_dim,), dtype=paddle.get_default_dtype(), default_initializer=nn.initializer.Constant(0.0)
43
- )
44
-
45
- # parameters for additional clip time embeddings
46
- self.embedding_proj = nn.Linear(clip_embeddings_dim, time_embed_dim)
47
- self.clip_image_embeddings_project_to_time_embeddings = nn.Linear(clip_embeddings_dim, time_embed_dim)
48
-
49
- # parameters for encoder hidden states
50
- self.clip_extra_context_tokens = clip_extra_context_tokens
51
- self.clip_extra_context_tokens_proj = nn.Linear(
52
- clip_embeddings_dim, self.clip_extra_context_tokens * cross_attention_dim
53
- )
54
- self.encoder_hidden_states_proj = nn.Linear(clip_embeddings_dim, cross_attention_dim)
55
- self.text_encoder_hidden_states_norm = nn.LayerNorm(cross_attention_dim)
56
-
57
- def forward(self, *, image_embeddings, text_embeddings, text_encoder_hidden_states, do_classifier_free_guidance):
58
- if do_classifier_free_guidance:
59
- # Add the classifier free guidance embeddings to the image embeddings
60
- image_embeddings_batch_size = image_embeddings.shape[0]
61
- classifier_free_guidance_embeddings = self.learned_classifier_free_guidance_embeddings.unsqueeze(0)
62
- classifier_free_guidance_embeddings = classifier_free_guidance_embeddings.expand(
63
- [image_embeddings_batch_size, -1]
64
- )
65
- image_embeddings = paddle.concat([classifier_free_guidance_embeddings, image_embeddings], axis=0)
66
-
67
- # The image embeddings batch size and the text embeddings batch size are equal
68
- assert image_embeddings.shape[0] == text_embeddings.shape[0]
69
-
70
- batch_size = text_embeddings.shape[0]
71
-
72
- # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
73
- # adding CLIP embeddings to the existing timestep embedding, ...
74
- time_projected_text_embeddings = self.embedding_proj(text_embeddings)
75
- time_projected_image_embeddings = self.clip_image_embeddings_project_to_time_embeddings(image_embeddings)
76
- additive_clip_time_embeddings = time_projected_image_embeddings + time_projected_text_embeddings
77
-
78
- # ... and by projecting CLIP embeddings into four
79
- # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
80
- clip_extra_context_tokens = self.clip_extra_context_tokens_proj(image_embeddings)
81
- clip_extra_context_tokens = clip_extra_context_tokens.reshape([batch_size, -1, self.clip_extra_context_tokens])
82
-
83
- text_encoder_hidden_states = self.encoder_hidden_states_proj(text_encoder_hidden_states)
84
- text_encoder_hidden_states = self.text_encoder_hidden_states_norm(text_encoder_hidden_states)
85
- text_encoder_hidden_states = text_encoder_hidden_states.transpose([0, 2, 1])
86
- text_encoder_hidden_states = paddle.concat([clip_extra_context_tokens, text_encoder_hidden_states], axis=2)
87
-
88
- return text_encoder_hidden_states, additive_clip_time_embeddings
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI-Hobbyist/Hoyo-RVC/i18n.py DELETED
@@ -1,28 +0,0 @@
1
- import locale
2
- import json
3
- import os
4
-
5
-
6
- def load_language_list(language):
7
- with open(f"./i18n/{language}.json", "r", encoding="utf-8") as f:
8
- language_list = json.load(f)
9
- return language_list
10
-
11
-
12
- class I18nAuto:
13
- def __init__(self, language=None):
14
- if language in ["Auto", None]:
15
- language = locale.getdefaultlocale()[
16
- 0
17
- ] # getlocale can't identify the system's language ((None, None))
18
- if not os.path.exists(f"./i18n/{language}.json"):
19
- language = "en_US"
20
- self.language = language
21
- # print("Use Language:", language)
22
- self.language_map = load_language_list(language)
23
-
24
- def __call__(self, key):
25
- return self.language_map.get(key, key)
26
-
27
- def print(self):
28
- print("Use Language:", self.language)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/generate_human_motion/pyrender/tests/unit/test_cameras.py DELETED
@@ -1,164 +0,0 @@
1
- import numpy as np
2
- import pytest
3
-
4
- from pyrender import PerspectiveCamera, OrthographicCamera
5
-
6
-
7
- def test_perspective_camera():
8
-
9
- # Set up constants
10
- znear = 0.05
11
- zfar = 100
12
- yfov = np.pi / 3.0
13
- width = 1000.0
14
- height = 500.0
15
- aspectRatio = 640.0 / 480.0
16
-
17
- # Test basics
18
- with pytest.raises(TypeError):
19
- p = PerspectiveCamera()
20
-
21
- p = PerspectiveCamera(yfov=yfov)
22
- assert p.yfov == yfov
23
- assert p.znear == 0.05
24
- assert p.zfar is None
25
- assert p.aspectRatio is None
26
- p.name = 'asdf'
27
- p.name = None
28
-
29
- with pytest.raises(ValueError):
30
- p.yfov = 0.0
31
-
32
- with pytest.raises(ValueError):
33
- p.yfov = -1.0
34
-
35
- with pytest.raises(ValueError):
36
- p.znear = -1.0
37
-
38
- p.znear = 0.0
39
- p.znear = 0.05
40
- p.zfar = 100.0
41
- assert p.zfar == 100.0
42
-
43
- with pytest.raises(ValueError):
44
- p.zfar = 0.03
45
-
46
- with pytest.raises(ValueError):
47
- p.zfar = 0.05
48
-
49
- p.aspectRatio = 10.0
50
- assert p.aspectRatio == 10.0
51
-
52
- with pytest.raises(ValueError):
53
- p.aspectRatio = 0.0
54
-
55
- with pytest.raises(ValueError):
56
- p.aspectRatio = -1.0
57
-
58
- # Test matrix getting/setting
59
-
60
- # NF
61
- p.znear = 0.05
62
- p.zfar = 100
63
- p.aspectRatio = None
64
-
65
- with pytest.raises(ValueError):
66
- p.get_projection_matrix()
67
-
68
- assert np.allclose(
69
- p.get_projection_matrix(width, height),
70
- np.array([
71
- [1.0 / (width / height * np.tan(yfov / 2.0)), 0.0, 0.0, 0.0],
72
- [0.0, 1.0 / np.tan(yfov / 2.0), 0.0, 0.0],
73
- [0.0, 0.0, (zfar + znear) / (znear - zfar),
74
- (2 * zfar * znear) / (znear - zfar)],
75
- [0.0, 0.0, -1.0, 0.0]
76
- ])
77
- )
78
-
79
- # NFA
80
- p.aspectRatio = aspectRatio
81
- assert np.allclose(
82
- p.get_projection_matrix(width, height),
83
- np.array([
84
- [1.0 / (aspectRatio * np.tan(yfov / 2.0)), 0.0, 0.0, 0.0],
85
- [0.0, 1.0 / np.tan(yfov / 2.0), 0.0, 0.0],
86
- [0.0, 0.0, (zfar + znear) / (znear - zfar),
87
- (2 * zfar * znear) / (znear - zfar)],
88
- [0.0, 0.0, -1.0, 0.0]
89
- ])
90
- )
91
- assert np.allclose(
92
- p.get_projection_matrix(), p.get_projection_matrix(width, height)
93
- )
94
-
95
- # N
96
- p.zfar = None
97
- p.aspectRatio = None
98
- assert np.allclose(
99
- p.get_projection_matrix(width, height),
100
- np.array([
101
- [1.0 / (width / height * np.tan(yfov / 2.0)), 0.0, 0.0, 0.0],
102
- [0.0, 1.0 / np.tan(yfov / 2.0), 0.0, 0.0],
103
- [0.0, 0.0, -1.0, -2.0 * znear],
104
- [0.0, 0.0, -1.0, 0.0]
105
- ])
106
- )
107
-
108
-
109
- def test_orthographic_camera():
110
- xm = 1.0
111
- ym = 2.0
112
- n = 0.05
113
- f = 100.0
114
-
115
- with pytest.raises(TypeError):
116
- c = OrthographicCamera()
117
-
118
- c = OrthographicCamera(xmag=xm, ymag=ym)
119
-
120
- assert c.xmag == xm
121
- assert c.ymag == ym
122
- assert c.znear == 0.05
123
- assert c.zfar == 100.0
124
- assert c.name is None
125
-
126
- with pytest.raises(TypeError):
127
- c.ymag = None
128
-
129
- with pytest.raises(ValueError):
130
- c.ymag = 0.0
131
-
132
- with pytest.raises(ValueError):
133
- c.ymag = -1.0
134
-
135
- with pytest.raises(TypeError):
136
- c.xmag = None
137
-
138
- with pytest.raises(ValueError):
139
- c.xmag = 0.0
140
-
141
- with pytest.raises(ValueError):
142
- c.xmag = -1.0
143
-
144
- with pytest.raises(TypeError):
145
- c.znear = None
146
-
147
- with pytest.raises(ValueError):
148
- c.znear = 0.0
149
-
150
- with pytest.raises(ValueError):
151
- c.znear = -1.0
152
-
153
- with pytest.raises(ValueError):
154
- c.zfar = 0.01
155
-
156
- assert np.allclose(
157
- c.get_projection_matrix(),
158
- np.array([
159
- [1.0 / xm, 0, 0, 0],
160
- [0, 1.0 / ym, 0, 0],
161
- [0, 0, 2.0 / (n - f), (f + n) / (n - f)],
162
- [0, 0, 0, 1.0]
163
- ])
164
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/tasks/svs/diffspeech_task.py DELETED
@@ -1,122 +0,0 @@
1
- import torch
2
-
3
- import utils
4
- from utils.hparams import hparams
5
- from modules.diff.net import DiffNet
6
- from modules.diff.shallow_diffusion_tts import GaussianDiffusion
7
- from tasks.svs.task import DiffFsTask
8
- from vocoders.base_vocoder import get_vocoder_cls, BaseVocoder
9
- from utils.pitch_utils import denorm_f0
10
- from tasks.tts.fs2_utils import FastSpeechDataset
11
-
12
- DIFF_DECODERS = {
13
- 'wavenet': lambda hp: DiffNet(hp['audio_num_mel_bins']),
14
- }
15
-
16
-
17
- class DiffSpeechTask(DiffFsTask):
18
- def __init__(self):
19
- super(DiffSpeechTask, self).__init__()
20
- self.dataset_cls = FastSpeechDataset
21
- self.vocoder: BaseVocoder = get_vocoder_cls(hparams)()
22
-
23
- def build_tts_model(self):
24
- mel_bins = hparams['audio_num_mel_bins']
25
- self.model = GaussianDiffusion(
26
- phone_encoder=self.phone_encoder,
27
- out_dims=mel_bins, denoise_fn=DIFF_DECODERS[hparams['diff_decoder_type']](hparams),
28
- timesteps=hparams['timesteps'],
29
- K_step=hparams['K_step'],
30
- loss_type=hparams['diff_loss_type'],
31
- spec_min=hparams['spec_min'], spec_max=hparams['spec_max'],
32
- )
33
- if hparams['fs2_ckpt'] != '':
34
- utils.load_ckpt(self.model.fs2, hparams['fs2_ckpt'], 'model', strict=True)
35
- # self.model.fs2.decoder = None
36
- for k, v in self.model.fs2.named_parameters():
37
- if not 'predictor' in k:
38
- v.requires_grad = False
39
-
40
- def build_optimizer(self, model):
41
- self.optimizer = optimizer = torch.optim.AdamW(
42
- filter(lambda p: p.requires_grad, model.parameters()),
43
- lr=hparams['lr'],
44
- betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']),
45
- weight_decay=hparams['weight_decay'])
46
- return optimizer
47
-
48
- def run_model(self, model, sample, return_output=False, infer=False):
49
- txt_tokens = sample['txt_tokens'] # [B, T_t]
50
- target = sample['mels'] # [B, T_s, 80]
51
- # mel2ph = sample['mel2ph'] if hparams['use_gt_dur'] else None # [B, T_s]
52
- mel2ph = sample['mel2ph']
53
- f0 = sample['f0']
54
- uv = sample['uv']
55
- energy = sample['energy']
56
- # fs2_mel = sample['fs2_mels']
57
- spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
58
- if hparams['pitch_type'] == 'cwt':
59
- cwt_spec = sample[f'cwt_spec']
60
- f0_mean = sample['f0_mean']
61
- f0_std = sample['f0_std']
62
- sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph)
63
-
64
- output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed,
65
- ref_mels=target, f0=f0, uv=uv, energy=energy, infer=infer)
66
-
67
- losses = {}
68
- if 'diff_loss' in output:
69
- losses['mel'] = output['diff_loss']
70
- self.add_dur_loss(output['dur'], mel2ph, txt_tokens, losses=losses)
71
- if hparams['use_pitch_embed']:
72
- self.add_pitch_loss(output, sample, losses)
73
- if hparams['use_energy_embed']:
74
- self.add_energy_loss(output['energy_pred'], energy, losses)
75
- if not return_output:
76
- return losses
77
- else:
78
- return losses, output
79
-
80
- def validation_step(self, sample, batch_idx):
81
- outputs = {}
82
- txt_tokens = sample['txt_tokens'] # [B, T_t]
83
-
84
- energy = sample['energy']
85
- spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
86
- mel2ph = sample['mel2ph']
87
- f0 = sample['f0']
88
- uv = sample['uv']
89
-
90
- outputs['losses'] = {}
91
-
92
- outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=False)
93
-
94
-
95
- outputs['total_loss'] = sum(outputs['losses'].values())
96
- outputs['nsamples'] = sample['nsamples']
97
- outputs = utils.tensors_to_scalars(outputs)
98
- if batch_idx < hparams['num_valid_plots']:
99
- # model_out = self.model(
100
- # txt_tokens, spk_embed=spk_embed, mel2ph=None, f0=None, uv=None, energy=None, ref_mels=None, infer=True)
101
- # self.plot_mel(batch_idx, model_out['mel_out'], model_out['fs2_mel'], name=f'diffspeech_vs_fs2_{batch_idx}')
102
- model_out = self.model(
103
- txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, energy=energy, ref_mels=None, infer=True)
104
- gt_f0 = denorm_f0(sample['f0'], sample['uv'], hparams)
105
- self.plot_wav(batch_idx, sample['mels'], model_out['mel_out'], is_mel=True, gt_f0=gt_f0, f0=model_out.get('f0_denorm'))
106
- self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'])
107
- return outputs
108
-
109
- ############
110
- # validation plots
111
- ############
112
- def plot_wav(self, batch_idx, gt_wav, wav_out, is_mel=False, gt_f0=None, f0=None, name=None):
113
- gt_wav = gt_wav[0].cpu().numpy()
114
- wav_out = wav_out[0].cpu().numpy()
115
- gt_f0 = gt_f0[0].cpu().numpy()
116
- f0 = f0[0].cpu().numpy()
117
- if is_mel:
118
- gt_wav = self.vocoder.spec2wav(gt_wav, f0=gt_f0)
119
- wav_out = self.vocoder.spec2wav(wav_out, f0=f0)
120
- self.logger.experiment.add_audio(f'gt_{batch_idx}', gt_wav, sample_rate=hparams['audio_sample_rate'], global_step=self.global_step)
121
- self.logger.experiment.add_audio(f'wav_{batch_idx}', wav_out, sample_rate=hparams['audio_sample_rate'], global_step=self.global_step)
122
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/model.py DELETED
@@ -1,913 +0,0 @@
1
- """ CLAP Model
2
-
3
- Adapted from CLIP: https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
4
- Adapted to the Audio Task.
5
- """
6
-
7
- from collections import OrderedDict
8
- from dataclasses import dataclass
9
- from email.mime import audio
10
- from typing import Tuple, Union, Callable, Optional
11
-
12
- import numpy as np
13
- import torch
14
- import torch.nn.functional as F
15
- from torch import nn
16
-
17
- from .timm_model import TimmModel
18
- import logging
19
- from .utils import freeze_batch_norm_2d
20
-
21
- from .pann_model import create_pann_model
22
- from .htsat import create_htsat_model
23
- from transformers import BertModel, RobertaModel, BartModel
24
- from transformers.tokenization_utils_base import BatchEncoding
25
-
26
-
27
- class MLPLayers(nn.Module):
28
- def __init__(self, units=[512, 512, 512], nonlin=nn.ReLU(), dropout=0.1):
29
- super(MLPLayers, self).__init__()
30
- self.nonlin = nonlin
31
- self.dropout = dropout
32
-
33
- sequence = []
34
- for u0, u1 in zip(units[:-1], units[1:]):
35
- sequence.append(nn.Linear(u0, u1))
36
- sequence.append(self.nonlin)
37
- sequence.append(nn.Dropout(self.dropout))
38
- sequence = sequence[:-2]
39
-
40
- self.sequential = nn.Sequential(*sequence)
41
-
42
- def forward(self, X):
43
- X = self.sequential(X)
44
- return X
45
-
46
-
47
- class Bottleneck(nn.Module):
48
- expansion = 4
49
-
50
- def __init__(self, inplanes, planes, stride=1):
51
- super().__init__()
52
-
53
- # all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
54
- self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
55
- self.bn1 = nn.BatchNorm2d(planes)
56
-
57
- self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
58
- self.bn2 = nn.BatchNorm2d(planes)
59
-
60
- self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
61
-
62
- self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
63
- self.bn3 = nn.BatchNorm2d(planes * self.expansion)
64
-
65
- self.relu = nn.ReLU(inplace=True)
66
- self.downsample = None
67
- self.stride = stride
68
-
69
- if stride > 1 or inplanes != planes * Bottleneck.expansion:
70
- # downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
71
- self.downsample = nn.Sequential(
72
- OrderedDict(
73
- [
74
- ("-1", nn.AvgPool2d(stride)),
75
- (
76
- "0",
77
- nn.Conv2d(
78
- inplanes,
79
- planes * self.expansion,
80
- 1,
81
- stride=1,
82
- bias=False,
83
- ),
84
- ),
85
- ("1", nn.BatchNorm2d(planes * self.expansion)),
86
- ]
87
- )
88
- )
89
-
90
- def forward(self, x: torch.Tensor):
91
- identity = x
92
-
93
- out = self.relu(self.bn1(self.conv1(x)))
94
- out = self.relu(self.bn2(self.conv2(out)))
95
- out = self.avgpool(out)
96
- out = self.bn3(self.conv3(out))
97
-
98
- if self.downsample is not None:
99
- identity = self.downsample(x)
100
-
101
- out += identity
102
- out = self.relu(out)
103
- return out
104
-
105
-
106
- class AttentionPool2d(nn.Module):
107
- def __init__(
108
- self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None
109
- ):
110
- super().__init__()
111
- self.positional_embedding = nn.Parameter(
112
- torch.randn(spacial_dim**2 + 1, embed_dim) / embed_dim**0.5
113
- )
114
- self.k_proj = nn.Linear(embed_dim, embed_dim)
115
- self.q_proj = nn.Linear(embed_dim, embed_dim)
116
- self.v_proj = nn.Linear(embed_dim, embed_dim)
117
- self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
118
- self.num_heads = num_heads
119
-
120
- def forward(self, x):
121
- x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(
122
- 2, 0, 1
123
- ) # NCHW -> (HW)NC
124
- x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
125
- x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
126
- x, _ = F.multi_head_attention_forward(
127
- query=x,
128
- key=x,
129
- value=x,
130
- embed_dim_to_check=x.shape[-1],
131
- num_heads=self.num_heads,
132
- q_proj_weight=self.q_proj.weight,
133
- k_proj_weight=self.k_proj.weight,
134
- v_proj_weight=self.v_proj.weight,
135
- in_proj_weight=None,
136
- in_proj_bias=torch.cat(
137
- [self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]
138
- ),
139
- bias_k=None,
140
- bias_v=None,
141
- add_zero_attn=False,
142
- dropout_p=0,
143
- out_proj_weight=self.c_proj.weight,
144
- out_proj_bias=self.c_proj.bias,
145
- use_separate_proj_weight=True,
146
- training=self.training,
147
- need_weights=False,
148
- )
149
-
150
- return x[0]
151
-
152
-
153
- class ModifiedResNet(nn.Module):
154
- """
155
- A ResNet class that is similar to torchvision's but contains the following changes:
156
- - There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
157
- - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
158
- - The final pooling layer is a QKV attention instead of an average pool
159
- """
160
-
161
- def __init__(self, layers, output_dim, heads, image_size=224, width=64):
162
- super().__init__()
163
- self.output_dim = output_dim
164
- self.image_size = image_size
165
-
166
- # the 3-layer stem
167
- self.conv1 = nn.Conv2d(
168
- 3, width // 2, kernel_size=3, stride=2, padding=1, bias=False
169
- )
170
- self.bn1 = nn.BatchNorm2d(width // 2)
171
- self.conv2 = nn.Conv2d(
172
- width // 2, width // 2, kernel_size=3, padding=1, bias=False
173
- )
174
- self.bn2 = nn.BatchNorm2d(width // 2)
175
- self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
176
- self.bn3 = nn.BatchNorm2d(width)
177
- self.avgpool = nn.AvgPool2d(2)
178
- self.relu = nn.ReLU(inplace=True)
179
-
180
- # residual layers
181
- self._inplanes = width # this is a *mutable* variable used during construction
182
- self.layer1 = self._make_layer(width, layers[0])
183
- self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
184
- self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
185
- self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
186
-
187
- embed_dim = width * 32 # the ResNet feature dimension
188
- self.attnpool = AttentionPool2d(image_size // 32, embed_dim, heads, output_dim)
189
-
190
- self.init_parameters()
191
-
192
- def _make_layer(self, planes, blocks, stride=1):
193
- layers = [Bottleneck(self._inplanes, planes, stride)]
194
-
195
- self._inplanes = planes * Bottleneck.expansion
196
- for _ in range(1, blocks):
197
- layers.append(Bottleneck(self._inplanes, planes))
198
-
199
- return nn.Sequential(*layers)
200
-
201
- def init_parameters(self):
202
- if self.attnpool is not None:
203
- std = self.attnpool.c_proj.in_features**-0.5
204
- nn.init.normal_(self.attnpool.q_proj.weight, std=std)
205
- nn.init.normal_(self.attnpool.k_proj.weight, std=std)
206
- nn.init.normal_(self.attnpool.v_proj.weight, std=std)
207
- nn.init.normal_(self.attnpool.c_proj.weight, std=std)
208
-
209
- for resnet_block in [self.layer1, self.layer2, self.layer3, self.layer4]:
210
- for name, param in resnet_block.named_parameters():
211
- if name.endswith("bn3.weight"):
212
- nn.init.zeros_(param)
213
-
214
- def lock(self, unlocked_groups=0, freeze_bn_stats=False):
215
- assert (
216
- unlocked_groups == 0
217
- ), "partial locking not currently supported for this model"
218
- for param in self.parameters():
219
- param.requires_grad = False
220
- if freeze_bn_stats:
221
- freeze_batch_norm_2d(self)
222
-
223
- def stem(self, x):
224
- for conv, bn in [
225
- (self.conv1, self.bn1),
226
- (self.conv2, self.bn2),
227
- (self.conv3, self.bn3),
228
- ]:
229
- x = self.relu(bn(conv(x)))
230
- x = self.avgpool(x)
231
- return x
232
-
233
- def forward(self, x):
234
- x = self.stem(x)
235
- x = self.layer1(x)
236
- x = self.layer2(x)
237
- x = self.layer3(x)
238
- x = self.layer4(x)
239
- x = self.attnpool(x)
240
-
241
- return x
242
-
243
-
244
- class LayerNorm(nn.LayerNorm):
245
- """Subclass torch's LayerNorm to handle fp16."""
246
-
247
- def forward(self, x: torch.Tensor):
248
- orig_type = x.dtype
249
- x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
250
- return x.to(orig_type)
251
-
252
-
253
- class QuickGELU(nn.Module):
254
- # NOTE This is slower than nn.GELU or nn.SiLU and uses more GPU memory
255
- def forward(self, x: torch.Tensor):
256
- return x * torch.sigmoid(1.702 * x)
257
-
258
-
259
- class ResidualAttentionBlock(nn.Module):
260
- def __init__(self, d_model: int, n_head: int, act_layer: Callable = nn.GELU):
261
- super().__init__()
262
-
263
- self.attn = nn.MultiheadAttention(d_model, n_head)
264
- self.ln_1 = LayerNorm(d_model)
265
- self.mlp = nn.Sequential(
266
- OrderedDict(
267
- [
268
- ("c_fc", nn.Linear(d_model, d_model * 4)),
269
- ("gelu", act_layer()),
270
- ("c_proj", nn.Linear(d_model * 4, d_model)),
271
- ]
272
- )
273
- )
274
- self.ln_2 = LayerNorm(d_model)
275
-
276
- def attention(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
277
- return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask)[0]
278
-
279
- def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
280
- x = x + self.attention(self.ln_1(x), attn_mask=attn_mask)
281
- x = x + self.mlp(self.ln_2(x))
282
- return x
283
-
284
-
285
- class Transformer(nn.Module):
286
- def __init__(
287
- self, width: int, layers: int, heads: int, act_layer: Callable = nn.GELU
288
- ):
289
- super().__init__()
290
- self.width = width
291
- self.layers = layers
292
- self.resblocks = nn.ModuleList(
293
- [
294
- ResidualAttentionBlock(width, heads, act_layer=act_layer)
295
- for _ in range(layers)
296
- ]
297
- )
298
-
299
- def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
300
- for r in self.resblocks:
301
- x = r(x, attn_mask=attn_mask)
302
- return x
303
-
304
-
305
- class VisualTransformer(nn.Module):
306
- def __init__(
307
- self,
308
- image_size: int,
309
- patch_size: int,
310
- width: int,
311
- layers: int,
312
- heads: int,
313
- output_dim: int,
314
- act_layer: Callable = nn.GELU,
315
- ):
316
- super().__init__()
317
- self.image_size = image_size
318
- self.output_dim = output_dim
319
- self.conv1 = nn.Conv2d(
320
- in_channels=3,
321
- out_channels=width,
322
- kernel_size=patch_size,
323
- stride=patch_size,
324
- bias=False,
325
- )
326
-
327
- scale = width**-0.5
328
- self.class_embedding = nn.Parameter(scale * torch.randn(width))
329
- self.positional_embedding = nn.Parameter(
330
- scale * torch.randn((image_size // patch_size) ** 2 + 1, width)
331
- )
332
- self.ln_pre = LayerNorm(width)
333
-
334
- self.text_branch = Transformer(width, layers, heads, act_layer=act_layer)
335
-
336
- self.ln_post = LayerNorm(width)
337
- self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
338
-
339
- def lock(self, unlocked_groups=0, freeze_bn_stats=False):
340
- assert (
341
- unlocked_groups == 0
342
- ), "partial locking not currently supported for this model"
343
- for param in self.parameters():
344
- param.requires_grad = False
345
-
346
- def forward(self, x: torch.Tensor):
347
- x = self.conv1(x) # shape = [*, width, grid, grid]
348
- x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
349
- x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
350
- x = torch.cat(
351
- [
352
- self.class_embedding.to(x.dtype)
353
- + torch.zeros(
354
- x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device
355
- ),
356
- x,
357
- ],
358
- dim=1,
359
- ) # shape = [*, grid ** 2 + 1, width]
360
- x = x + self.positional_embedding.to(x.dtype)
361
- x = self.ln_pre(x)
362
-
363
- x = x.permute(1, 0, 2) # NLD -> LND
364
- x = self.text_branch(x)
365
- x = x.permute(1, 0, 2) # LND -> NLD
366
-
367
- x = self.ln_post(x[:, 0, :])
368
-
369
- if self.proj is not None:
370
- x = x @ self.proj
371
-
372
- return x
373
-
374
-
375
- @dataclass
376
- class CLAPVisionCfg:
377
- layers: Union[Tuple[int, int, int, int], int] = 12
378
- width: int = 768
379
- patch_size: int = 16
380
- image_size: Union[Tuple[int, int], int] = 224
381
- timm_model_name: str = (
382
- None # a valid model name overrides layers, width, patch_size
383
- )
384
- timm_model_pretrained: bool = (
385
- False # use (imagenet) pretrained weights for named model
386
- )
387
- timm_pool: str = (
388
- "avg" # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '')
389
- )
390
- timm_proj: str = (
391
- "linear" # linear projection for timm model output ('linear', 'mlp', '')
392
- )
393
-
394
-
395
- # Audio Config Class
396
- @dataclass
397
- class CLAPAudioCfp:
398
- model_type: str = "PANN"
399
- model_name: str = "Cnn14"
400
- sample_rate: int = 48000
401
- # Param
402
- audio_length: int = 1024
403
- window_size: int = 1024
404
- hop_size: int = 1024
405
- fmin: int = 50
406
- fmax: int = 14000
407
- class_num: int = 527
408
- mel_bins: int = 64
409
- clip_samples: int = 480000
410
-
411
-
412
- @dataclass
413
- class CLAPTextCfg:
414
- context_length: int
415
- vocab_size: int
416
- width: int
417
- heads: int
418
- layers: int
419
- model_type: str
420
-
421
-
422
- class CLAP(nn.Module):
423
- def __init__(
424
- self,
425
- embed_dim: int,
426
- audio_cfg: CLAPAudioCfp,
427
- text_cfg: CLAPTextCfg,
428
- quick_gelu: bool = False,
429
- enable_fusion: bool = False,
430
- fusion_type: str = 'None',
431
- joint_embed_shape: int = 512,
432
- mlp_act: str = 'relu',
433
- ):
434
- super().__init__()
435
- if isinstance(audio_cfg, dict):
436
- audio_cfg = CLAPAudioCfp(**audio_cfg)
437
- if isinstance(text_cfg, dict):
438
- text_cfg = CLAPTextCfg(**text_cfg)
439
-
440
- self.audio_cfg = audio_cfg
441
- self.text_cfg = text_cfg
442
- self.enable_fusion = enable_fusion
443
- self.fusion_type = fusion_type
444
- self.joint_embed_shape = joint_embed_shape
445
- self.mlp_act = mlp_act
446
-
447
-
448
- self.context_length = text_cfg.context_length
449
-
450
- # OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more
451
- # memory efficient in recent PyTorch releases (>= 1.10).
452
- # NOTE: timm models always use native GELU regardless of quick_gelu flag.
453
- act_layer = QuickGELU if quick_gelu else nn.GELU
454
-
455
- if mlp_act == 'relu':
456
- mlp_act_layer = nn.ReLU()
457
- elif mlp_act == 'gelu':
458
- mlp_act_layer = nn.GELU()
459
- else:
460
- raise NotImplementedError
461
-
462
- # audio branch
463
- # audio branch parameters
464
- if audio_cfg.model_type == "PANN":
465
- self.audio_branch = create_pann_model(audio_cfg, enable_fusion, fusion_type)
466
- elif audio_cfg.model_type == "HTSAT":
467
- self.audio_branch = create_htsat_model(audio_cfg, enable_fusion, fusion_type)
468
- else:
469
- logging.error(f"Model config for {audio_cfg.model_type} not found")
470
- raise RuntimeError(f"Model config for {audio_cfg.model_type} not found.")
471
-
472
-
473
- # text branch
474
- # text branch parameters
475
- if text_cfg.model_type == "transformer":
476
- self.text_branch = Transformer(
477
- width=text_cfg.width,
478
- layers=text_cfg.layers,
479
- heads=text_cfg.heads,
480
- act_layer=act_layer,
481
- )
482
- self.vocab_size = text_cfg.vocab_size
483
- self.token_embedding = nn.Embedding(text_cfg.vocab_size, text_cfg.width)
484
- self.positional_embedding = nn.Parameter(
485
- torch.empty(self.context_length, text_cfg.width)
486
- )
487
- self.ln_final = LayerNorm(text_cfg.width)
488
- self.text_transform = MLPLayers(units=[self.joint_embed_shape,
489
- self.joint_embed_shape,
490
- self.joint_embed_shape], dropout=0.1)
491
- self.text_projection = nn.Sequential(
492
- nn.Linear(text_cfg.width, self.joint_embed_shape),
493
- mlp_act_layer,
494
- nn.Linear(self.joint_embed_shape, self.joint_embed_shape)
495
- )
496
- elif text_cfg.model_type == "bert":
497
- self.text_branch = BertModel.from_pretrained("bert-base-uncased")
498
- self.text_transform = MLPLayers(units=[self.joint_embed_shape,
499
- self.joint_embed_shape,
500
- self.joint_embed_shape], dropout=0.1)
501
- self.text_projection = nn.Sequential(
502
- nn.Linear(768, self.joint_embed_shape),
503
- mlp_act_layer,
504
- nn.Linear(self.joint_embed_shape, self.joint_embed_shape)
505
- )
506
- elif text_cfg.model_type == "roberta":
507
- self.text_branch = RobertaModel.from_pretrained('roberta-base')
508
- self.text_transform = MLPLayers(units=[self.joint_embed_shape,
509
- self.joint_embed_shape,
510
- self.joint_embed_shape], dropout=0.1)
511
- self.text_projection = nn.Sequential(
512
- nn.Linear(768, self.joint_embed_shape),
513
- mlp_act_layer,
514
- nn.Linear(self.joint_embed_shape, self.joint_embed_shape)
515
- )
516
- elif text_cfg.model_type == "bart":
517
- self.text_branch = BartModel.from_pretrained('facebook/bart-base')
518
- self.text_transform = MLPLayers(units=[self.joint_embed_shape,
519
- self.joint_embed_shape,
520
- self.joint_embed_shape], dropout=0.1)
521
- self.text_projection = nn.Sequential(
522
- nn.Linear(768, self.joint_embed_shape),
523
- mlp_act_layer,
524
- nn.Linear(self.joint_embed_shape, self.joint_embed_shape)
525
- )
526
- else:
527
- logging.error(f"Model config for {text_cfg.model_type} not found")
528
- raise RuntimeError(f"Model config for {text_cfg.model_type} not found.")
529
- self.text_branch_type = text_cfg.model_type
530
- # text branch parameters
531
-
532
- # audio branch parameters
533
- self.audio_transform = MLPLayers(units=[self.joint_embed_shape,
534
- self.joint_embed_shape,
535
- self.joint_embed_shape], dropout=0.1)
536
-
537
- # below here is text branch parameters
538
-
539
- # ============================================================================================================
540
- self.audio_projection = nn.Sequential(
541
- nn.Linear(embed_dim, self.joint_embed_shape),
542
- mlp_act_layer,
543
- nn.Linear(self.joint_embed_shape, self.joint_embed_shape)
544
- )
545
-
546
- self.logit_scale_a = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
547
- self.logit_scale_t = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
548
- self.register_buffer("attn_mask", self.build_attention_mask(), persistent=False)
549
-
550
- self.init_text_branch_parameters()
551
-
552
- def init_text_branch_parameters(self):
553
- if self.text_branch_type == "transformer":
554
- nn.init.normal_(self.token_embedding.weight, std=0.02)
555
- nn.init.normal_(self.positional_embedding, std=0.01)
556
- proj_std = (self.text_branch.width**-0.5) * (
557
- (2 * self.text_branch.layers) ** -0.5
558
- )
559
- attn_std = self.text_branch.width**-0.5
560
- fc_std = (2 * self.text_branch.width) ** -0.5
561
- for block in self.text_branch.resblocks:
562
- nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
563
- nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
564
- nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
565
- nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
566
- if self.text_branch_type == "bert" or self.text_branch_type == "roberta":
567
- width = self.text_branch.embeddings.word_embeddings.weight.shape[-1]
568
- elif self.text_branch_type == "bart":
569
- width = self.text_branch.shared.weight.shape[-1]
570
- else:
571
- width = self.text_branch.width
572
- nn.init.constant_(self.logit_scale_a, np.log(1 / 0.07))
573
- nn.init.constant_(self.logit_scale_t, np.log(1 / 0.07))
574
-
575
- # deprecated
576
- # if hasattr(self.visual, 'init_parameters'):
577
- # self.visual.init_parameters()
578
-
579
- # if self.text_projection is not None:
580
- # nn.init.normal_(self.text_projection, std=width**-0.5)
581
-
582
- def build_attention_mask(self):
583
- # lazily create causal attention mask, with full attention between the vision tokens
584
- # pytorch uses additive attention mask; fill with -inf
585
- mask = torch.empty(self.context_length, self.context_length)
586
- mask.fill_(float("-inf"))
587
- mask.triu_(1) # zero out the lower diagonal
588
- return mask
589
-
590
- def encode_audio(self, audio, device):
591
- return self.audio_branch(audio, mixup_lambda=None, device=device) # mix lambda needs to add
592
-
593
- # def list_of_dict_of_tensor2dict_of_tensor(self, x, device):
594
- # tmp = {}
595
- # for k in x[0].keys():
596
- # tmp[k] = []
597
- # for i in range(len(x)):
598
- # tmp[k].append(x[i][k][:77])
599
- # for k in x[0].keys():
600
- # tmp[k] = torch.tensor(tmp[k]).to(device=device, non_blocking=True)
601
- # return tmp
602
-
603
- def encode_text(self, text, device):
604
- if self.text_branch_type == "transformer":
605
- text = text.to(device=device, non_blocking=True)
606
- x = self.token_embedding(text) # [batch_size, n_ctx, d_model]
607
-
608
- x = x + self.positional_embedding
609
- x = x.permute(1, 0, 2) # NLD -> LND
610
- x = self.text_branch(x, attn_mask=self.attn_mask)
611
- x = x.permute(1, 0, 2) # LND -> NLD
612
- x = self.ln_final(x)
613
-
614
- # x.shape = [batch_size, n_ctx, transformer.width]
615
- # take features from the eot embedding (eot_token is the highest number in each sequence)
616
- x = self.text_projection(x[torch.arange(x.shape[0]), text.argmax(dim=-1)])
617
- elif self.text_branch_type == "bert":
618
- # text = self.list_of_dict_of_tensor2dict_of_tensor(text, device)
619
- # text = BatchEncoding(text)
620
- x = self.text_branch(
621
- input_ids=text["input_ids"].to(device=device, non_blocking=True),
622
- attention_mask=text["attention_mask"].to(
623
- device=device, non_blocking=True
624
- ),
625
- token_type_ids=text["token_type_ids"].to(
626
- device=device, non_blocking=True
627
- ),
628
- )["pooler_output"]
629
- x = self.text_projection(x)
630
- elif self.text_branch_type == "roberta":
631
- x = self.text_branch(
632
- input_ids=text["input_ids"].to(device=device, non_blocking=True),
633
- attention_mask=text["attention_mask"].to(
634
- device=device, non_blocking=True
635
- ),
636
- )["pooler_output"]
637
-
638
- x = self.text_projection(x)
639
- elif self.text_branch_type == "bart":
640
- x = torch.mean(self.text_branch(
641
- input_ids=text["input_ids"].to(device=device, non_blocking=True),
642
- attention_mask=text["attention_mask"].to(
643
- device=device, non_blocking=True
644
- ),
645
- )["encoder_last_hidden_state"],axis=1)
646
- x = self.text_projection(x)
647
- else:
648
- logging.error(f"Model type {self.text_branch_type} not found")
649
- raise RuntimeError(f"Model type {self.text_branch_type} not found.")
650
- return x
651
-
652
- def forward(self, audio, text, device=None):
653
- """Forward audio and text into the CLAP
654
-
655
- Parameters
656
- ----------
657
- audio: torch.Tensor (batch_size, audio_length)
658
- the time-domain audio input / the batch of mel_spec and longer list.
659
- text: torch.Tensor () // need to add
660
- the text token input
661
- """
662
- if device is None:
663
- if audio is not None:
664
- device = audio.device
665
- elif text is not None:
666
- device = text.device
667
- if audio is None and text is None:
668
- # a hack to get the logit scale
669
- return self.logit_scale_a.exp(), self.logit_scale_t.exp()
670
- elif audio is None:
671
- return self.encode_text(text, device=device)
672
- elif text is None:
673
- return self.audio_projection(self.encode_audio(audio, device=device)["embedding"])
674
- audio_features = self.audio_projection(self.encode_audio(audio, device=device)["embedding"])
675
- audio_features = F.normalize(audio_features, dim=-1)
676
-
677
- text_features = self.encode_text(
678
- text, device=device
679
- )
680
- # print("text_features", text_features)
681
- # print("text_features.shape", text_features.shape)
682
- # print("text_features.type", type(text_features))
683
- text_features = F.normalize(text_features, dim=-1)
684
-
685
- audio_features_mlp = self.audio_transform(audio_features)
686
- text_features_mlp = self.text_transform(text_features)
687
- # Four outputs: audio features (basic & MLP), text features (basic & MLP)
688
- return (
689
- audio_features,
690
- text_features,
691
- audio_features_mlp,
692
- text_features_mlp,
693
- self.logit_scale_a.exp(),
694
- self.logit_scale_t.exp(),
695
- )
696
-
697
- def get_logit_scale(self):
698
- return self.logit_scale_a.exp(), self.logit_scale_t.exp()
699
-
700
- def get_textual_embedding(self, data):
701
-
702
- device = next(self.parameters()).device
703
- for k in data:
704
- data[k] = data[k].to(device)
705
-
706
- # if self.text_branch_type == "roberta":
707
- text_embeds = self.text_branch(
708
- input_ids=data["input_ids"].to(device=device, non_blocking=True),
709
- attention_mask=data["attention_mask"].to(device=device, non_blocking=True),
710
- )["last_hidden_state"]
711
-
712
- text_embeds = self.text_projection(text_embeds)
713
-
714
- text_embeds = F.normalize(text_embeds, dim=-1)
715
-
716
- return text_embeds
717
-
718
- def get_text_embedding(self, data):
719
- """Get the text embedding from the model
720
-
721
- Parameters
722
- ----------
723
- data: torch.Tensor
724
- a tensor of text embedding
725
-
726
- Returns
727
- ----------
728
- text_embed: torch.Tensor
729
- a tensor of text_embeds (N, D)
730
-
731
- """
732
- device = next(self.parameters()).device
733
- for k in data:
734
- data[k] = data[k].to(device)
735
- text_embeds = self.encode_text(data, device=device)
736
- text_embeds = F.normalize(text_embeds, dim=-1)
737
-
738
- return text_embeds
739
-
740
- def get_audio_embedding(self, data):
741
- """Get the audio embedding from the model
742
-
743
- Parameters
744
- ----------
745
- data: a list of dict
746
- the audio input dict list from 'get_audio_feature' method
747
-
748
- Returns
749
- ----------
750
- audio_embed: torch.Tensor
751
- a tensor of audio_embeds (N, D)
752
-
753
- """
754
- device = next(self.parameters()).device
755
- input_dict = {}
756
- keys = data[0].keys()
757
- for k in keys:
758
- input_dict[k] = torch.cat([d[k].unsqueeze(0) for d in data], dim=0).to(device)
759
-
760
- audio_embeds = self.audio_projection(self.encode_audio(input_dict, device=device)["embedding"])
761
- audio_embeds = F.normalize(audio_embeds, dim=-1)
762
-
763
- return audio_embeds
764
-
765
-
766
-
767
- def audio_infer(self, audio, hopsize=None, device=None):
768
- """Forward one audio and produce the audio embedding
769
-
770
- Parameters
771
- ----------
772
- audio: (audio_length)
773
- the time-domain audio input, notice that it must be only one input
774
- hopsize: int
775
- the overlap hopsize as the sliding window
776
-
777
- Returns
778
- ----------
779
- output_dict: {
780
- key: [n, (embedding_shape)] if "HTS-AT"
781
- or
782
- key: [(embedding_shape)] if "PANN"
783
- }
784
- the list of key values of the audio branch
785
-
786
- """
787
-
788
- assert not self.training, "the inference mode must be run at eval stage"
789
- output_dict = {}
790
- # PANN
791
- if self.audio_cfg.model_type == "PANN":
792
- audio_input = audio.unsqueeze(dim=0)
793
- output_dict[key] = self.encode_audio(audio_input, device=device)[key].squeeze(dim=0)
794
- elif self.audio_cfg.model_type == "HTSAT":
795
- # repeat
796
- audio_len = len(audio)
797
- k = self.audio_cfg.clip_samples // audio_len
798
- if k > 1:
799
- audio = audio.repeat(k)
800
- audio_len = len(audio)
801
-
802
- if hopsize is None:
803
- hopsize = min(hopsize, audio_len)
804
-
805
- if audio_len > self.audio_cfg.clip_samples:
806
- audio_input = [
807
- audio[pos : pos + self.audio_cfg.clip_samples].clone()
808
- for pos in range(
809
- 0, audio_len - self.audio_cfg.clip_samples, hopsize
810
- )
811
- ]
812
- audio_input.append(audio[-self.audio_cfg.clip_samples :].clone())
813
- audio_input = torch.stack(audio_input)
814
- output_dict[key] = self.encode_audio(audio_input, device=device)[key]
815
- else:
816
- audio_input = audio.unsqueeze(dim=0)
817
- output_dict[key] = self.encode_audio(audio_input, device=device)[key].squeeze(dim=0)
818
-
819
- return output_dict
820
-
821
-
822
- def convert_weights_to_fp16(model: nn.Module):
823
- """Convert applicable model parameters to fp16"""
824
-
825
- def _convert_weights_to_fp16(l):
826
- if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
827
- l.weight.data = l.weight.data.half()
828
- if l.bias is not None:
829
- l.bias.data = l.bias.data.half()
830
-
831
- if isinstance(l, nn.MultiheadAttention):
832
- for attr in [
833
- *[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]],
834
- "in_proj_bias",
835
- "bias_k",
836
- "bias_v",
837
- ]:
838
- tensor = getattr(l, attr)
839
- if tensor is not None:
840
- tensor.data = tensor.data.half()
841
-
842
- for name in ["text_projection", "proj"]:
843
- if hasattr(l, name):
844
- attr = getattr(l, name)
845
- if attr is not None:
846
- attr.data = attr.data.half()
847
-
848
- model.apply(_convert_weights_to_fp16)
849
-
850
-
851
- # Ignore the state dict of the vision part
852
- def build_model_from_openai_state_dict(state_dict: dict, model_cfg, enable_fusion: bool = False, fusion_type: str = 'None'):
853
-
854
- embed_dim = model_cfg["embed_dim"]
855
- audio_cfg = model_cfg["audio_cfg"]
856
- text_cfg = model_cfg["text_cfg"]
857
- context_length = state_dict["positional_embedding"].shape[0]
858
- vocab_size = state_dict["token_embedding.weight"].shape[0]
859
- transformer_width = state_dict["ln_final.weight"].shape[0]
860
- transformer_heads = transformer_width // 64
861
- transformer_layers = len(
862
- set(
863
- k.split(".")[2]
864
- for k in state_dict
865
- if k.startswith(f"transformer.resblocks")
866
- )
867
- )
868
-
869
- audio_cfg = CLAPAudioCfp(**audio_cfg)
870
- text_cfg = CLAPTextCfg(**text_cfg)
871
-
872
- model = CLAP(
873
- embed_dim,
874
- audio_cfg=audio_cfg,
875
- text_cfg=text_cfg,
876
- quick_gelu=True, # OpenAI models were trained with QuickGELU
877
- enable_fusion=enable_fusion,
878
- fusion_type=fusion_type
879
- )
880
- state_dict["logit_scale_a"] = state_dict["logit_scale"]
881
- state_dict["logit_scale_t"] = state_dict["logit_scale"]
882
- pop_keys = list(state_dict.keys())[::]
883
- # pop the visual branch saved weights
884
- for key in pop_keys:
885
- if key.startswith("visual."):
886
- state_dict.pop(key, None)
887
-
888
- for key in ["logit_scale", "input_resolution", "context_length", "vocab_size"]:
889
- state_dict.pop(key, None)
890
-
891
- # not use fp16
892
- # convert_weights_to_fp16(model)
893
- model.load_state_dict(state_dict, strict=False)
894
- return model.eval()
895
-
896
-
897
- def trace_model(model, batch_size=256, device=torch.device("cpu")):
898
- model.eval()
899
- audio_length = model.audio_cfg.audio_length
900
- example_audio = torch.ones((batch_size, audio_length), device=device)
901
- example_text = torch.zeros(
902
- (batch_size, model.context_length), dtype=torch.int, device=device
903
- )
904
- model = torch.jit.trace_module(
905
- model,
906
- inputs=dict(
907
- forward=(example_audio, example_text),
908
- encode_text=(example_text,),
909
- encode_image=(example_audio,),
910
- ),
911
- )
912
- model.audio_cfg.audio_length = audio_length # Question: what does this do?
913
- return model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_speech/data_gen/tts/runs/preprocess.py DELETED
@@ -1,17 +0,0 @@
1
- import utils.commons.single_thread_env # NOQA
2
- from text_to_speech.utils.commons.hparams import hparams, set_hparams
3
- import importlib
4
-
5
-
6
- def preprocess():
7
- assert hparams['preprocess_cls'] != ''
8
-
9
- pkg = ".".join(hparams["preprocess_cls"].split(".")[:-1])
10
- cls_name = hparams["preprocess_cls"].split(".")[-1]
11
- process_cls = getattr(importlib.import_module(pkg), cls_name)
12
- process_cls().process()
13
-
14
-
15
- if __name__ == '__main__':
16
- set_hparams()
17
- preprocess()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIhackrOrg/README/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: README
3
- emoji: 🔥
4
- colorFrom: yellow
5
- colorTo: indigo
6
- sdk: static
7
- pinned: false
8
- ---
9
-
10
- Edit this `README.md` markdown file to author your organization card 🔥
 
 
 
 
 
 
 
 
 
 
 
spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/modules/streaming.py DELETED
@@ -1,135 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- """
8
- Streaming module API that should be implemented by all Streaming components,
9
- """
10
-
11
- from contextlib import contextmanager
12
- import typing as tp
13
- from torch import nn
14
- import torch
15
-
16
-
17
- State = tp.Dict[str, torch.Tensor]
18
-
19
-
20
- class StreamingModule(nn.Module):
21
- """Common API for streaming components.
22
-
23
- Each streaming component has a streaming state, which is just a dict[str, Tensor].
24
- By convention, the first dim of each tensor must be the batch size.
25
- Don't use dots in the key names, as this would clash with submodules
26
- (like in state_dict).
27
-
28
- If `self._is_streaming` is True, the component should use and remember
29
- the proper state inside `self._streaming_state`.
30
-
31
- To set a streaming component in streaming state, use
32
-
33
- with module.streaming():
34
- ...
35
-
36
- This will automatically reset the streaming state when exiting the context manager.
37
- This also automatically propagates to all streaming children module.
38
-
39
- Some module might also implement the `StreamingModule.flush` method, although
40
- this one is trickier, as all parents module must be StreamingModule and implement
41
- it as well for it to work properly. See `StreamingSequential` after.
42
- """
43
- def __init__(self) -> None:
44
- super().__init__()
45
- self._streaming_state: State = {}
46
- self._is_streaming = False
47
-
48
- def _apply_named_streaming(self, fn: tp.Any):
49
- for name, module in self.named_modules():
50
- if isinstance(module, StreamingModule):
51
- fn(name, module)
52
-
53
- def _set_streaming(self, streaming: bool):
54
- def _set_streaming(name, module):
55
- module._is_streaming = streaming
56
- self._apply_named_streaming(_set_streaming)
57
-
58
- @contextmanager
59
- def streaming(self):
60
- """Context manager to enter streaming mode. Reset streaming state on exit.
61
- """
62
- self._set_streaming(True)
63
- try:
64
- yield
65
- finally:
66
- self._set_streaming(False)
67
- self.reset_streaming()
68
-
69
- def reset_streaming(self):
70
- """Reset the streaming state.
71
- """
72
- def _reset(name: str, module: StreamingModule):
73
- module._streaming_state.clear()
74
-
75
- self._apply_named_streaming(_reset)
76
-
77
- def get_streaming_state(self) -> State:
78
- """Return the streaming state, including that of sub-modules.
79
- """
80
- state: State = {}
81
-
82
- def _add(name: str, module: StreamingModule):
83
- if name:
84
- name += "."
85
- for key, value in module._streaming_state.items():
86
- state[name + key] = value
87
-
88
- self._apply_named_streaming(_add)
89
- return state
90
-
91
- def set_streaming_state(self, state: State):
92
- """Set the streaming state, including that of sub-modules.
93
- """
94
- state = dict(state)
95
-
96
- def _set(name: str, module: StreamingModule):
97
- if name:
98
- name += "."
99
- module._streaming_state.clear()
100
- for key, value in list(state.items()):
101
- # complexity is not ideal here, but probably fine.
102
- if key.startswith(name):
103
- local_key = key[len(name):]
104
- if '.' not in local_key:
105
- module._streaming_state[local_key] = value
106
- del state[key]
107
-
108
- self._apply_named_streaming(_set)
109
- assert len(state) == 0, list(state.keys())
110
-
111
- def flush(self, x: tp.Optional[torch.Tensor] = None):
112
- """Flush any remaining outputs that were waiting for completion.
113
- Typically, for convolutions, this will add the final padding
114
- and process the last buffer.
115
-
116
- This should take an optional argument `x`, which will be provided
117
- if a module before this one in the streaming pipeline has already
118
- spitted out a flushed out buffer.
119
- """
120
- if x is None:
121
- return None
122
- else:
123
- return self(x)
124
-
125
-
126
- class StreamingSequential(StreamingModule, nn.Sequential):
127
- """A streaming compatible alternative of `nn.Sequential`.
128
- """
129
- def flush(self, x: tp.Optional[torch.Tensor] = None):
130
- for module in self:
131
- if isinstance(module, StreamingModule):
132
- x = module.flush(x)
133
- elif x is not None:
134
- x = module(x)
135
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AbdoulGafar/woodsound/app.py DELETED
@@ -1,73 +0,0 @@
1
- import gradio as gr
2
- from fastai.vision.all import *
3
- from PIL import Image
4
- import tempfile
5
- import os
6
-
7
- import rarfile
8
- import librosa
9
- import librosa.display
10
- import matplotlib.pyplot as plt
11
-
12
- # Chargez votre modèle Fastai
13
- learn = load_learner('woodsound')
14
-
15
- # Fonction pour effectuer la prédiction à partir d'un fichier audio
16
- def predict_audio(audio_file):
17
-
18
- # Charger le fichier audio
19
- samples, sample_rate = librosa.load(audio_file)
20
-
21
- # Créez un répertoire temporaire pour stocker l'image temporaire
22
- with tempfile.TemporaryDirectory() as temp_dir:
23
-
24
- # Créer le nom de fichier pour le spectrogramme
25
- spectrogram_filename = os.path.splitext(os.path.basename(audio_file))[0] + ".png"
26
- temp_image_path = os.path.join(temp_dir, spectrogram_filename)
27
-
28
- # Créer et enregistrer le spectrogramme
29
- plt.figure(figsize=(0.72, 0.72))
30
- ax = plt.subplot(111)
31
- ax.axes.get_xaxis().set_visible(False)
32
- ax.axes.get_yaxis().set_visible(False)
33
- ax.set_frame_on(False)
34
- S = librosa.feature.melspectrogram(y=samples, sr=sample_rate)
35
- librosa.display.specshow(librosa.power_to_db(S, ref=np.max))
36
- plt.savefig(temp_image_path, dpi=400, bbox_inches='tight', pad_inches=0)
37
- plt.close()
38
-
39
- # Effectuez la prédiction avec le modèle en utilisant l'image temporaire
40
- probabilities = predict_with_probabilities(temp_image_path, learn)
41
-
42
- return probabilities
43
-
44
-
45
-
46
- # Fonction pour effectuer la prédiction avec le modèle Fastai
47
- def predict_with_probabilities(image_path, learn):
48
-
49
- # Chargez l'image
50
- img = Image.open(image_path).convert('RGB')
51
-
52
- # Redimensionnez l'image à 224x224
53
- img = img.resize((224, 224))
54
-
55
- # Convertissez l'image en un format que Fastai peut utiliser (PILImage)
56
- img = PILImage(img)
57
-
58
- # Faites la prédiction sur l'image redimensionnée
59
- pred,idx,prob = learn.predict(img)
60
-
61
- # Obtenez les noms des catégories
62
- categories = ('ache', 'scie_electrique', 'oiseaux', 'arme_a_feu', 'scie_manuelle')
63
-
64
- return dict(zip(categories,map(float,prob)))
65
-
66
- # Créez l'interface Gradio
67
- examples_files = [
68
- ["song_1.wav"],
69
- ["song_2.mp3"],
70
- ]
71
- audio_input = gr.Audio(source='upload', type='filepath', label="Sélectionnez un fichier audio (wav, mp3, etc.)", )
72
- output_component = gr.Label(num_top_classes= 5)
73
- gr.Interface(fn=predict_audio, inputs=audio_input, outputs=output_component, examples=examples_files).launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/CoAdapter/ldm/models/diffusion/dpm_solver/__init__.py DELETED
@@ -1 +0,0 @@
1
- from .sampler import DPMSolverSampler
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fullwindowrectangle/Factory.js DELETED
@@ -1,13 +0,0 @@
1
- import FullWindowRectangle from './FullWindowRectangle.js';
2
- import ObjectFactory from '../ObjectFactory.js';
3
- import SetValue from '../../../plugins/utils/object/SetValue.js';
4
-
5
- ObjectFactory.register('fullWindowRectangle', function (fillColor, fillAlpha) {
6
- var gameObject = new FullWindowRectangle(this.scene, fillColor, fillAlpha);
7
- this.scene.add.existing(gameObject);
8
- return gameObject;
9
- });
10
-
11
- SetValue(window, 'RexPlugins.UI.FullWindowRectangle', FullWindowRectangle);
12
-
13
- export default FullWindowRectangle;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/saicinpainting/evaluation/masks/countless/README.md DELETED
@@ -1,25 +0,0 @@
1
- [![Build Status](https://travis-ci.org/william-silversmith/countless.svg?branch=master)](https://travis-ci.org/william-silversmith/countless)
2
-
3
- Python COUNTLESS Downsampling
4
- =============================
5
-
6
- To install:
7
-
8
- `pip install -r requirements.txt`
9
-
10
- To test:
11
-
12
- `python test.py`
13
-
14
- To benchmark countless2d:
15
-
16
- `python python/countless2d.py python/images/gray_segmentation.png`
17
-
18
- To benchmark countless3d:
19
-
20
- `python python/countless3d.py`
21
-
22
- Adjust N and the list of algorithms inside each script to modify the run parameters.
23
-
24
-
25
- Python3 is slightly faster than Python2.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: CivitAi To Diffusers
3
- emoji: ©️➡️🧨
4
- colorFrom: orange
5
- colorTo: cyan
6
- sdk: gradio
7
- sdk_version: 3.31.0
8
- app_file: app1.py
9
- pinned: true
10
- duplicated_from: Androidonnxfork/sd-to-diffuserscustom
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/Makefile DELETED
@@ -1,96 +0,0 @@
1
- .PHONY: deps_table_update modified_only_fixup extra_style_checks quality style fixup fix-copies test test-examples
2
-
3
- # make sure to test the local checkout in scripts and not the pre-installed one (don't use quotes!)
4
- export PYTHONPATH = src
5
-
6
- check_dirs := examples scripts src tests utils
7
-
8
- modified_only_fixup:
9
- $(eval modified_py_files := $(shell python utils/get_modified_files.py $(check_dirs)))
10
- @if test -n "$(modified_py_files)"; then \
11
- echo "Checking/fixing $(modified_py_files)"; \
12
- black $(modified_py_files); \
13
- ruff $(modified_py_files); \
14
- else \
15
- echo "No library .py files were modified"; \
16
- fi
17
-
18
- # Update src/diffusers/dependency_versions_table.py
19
-
20
- deps_table_update:
21
- @python setup.py deps_table_update
22
-
23
- deps_table_check_updated:
24
- @md5sum src/diffusers/dependency_versions_table.py > md5sum.saved
25
- @python setup.py deps_table_update
26
- @md5sum -c --quiet md5sum.saved || (printf "\nError: the version dependency table is outdated.\nPlease run 'make fixup' or 'make style' and commit the changes.\n\n" && exit 1)
27
- @rm md5sum.saved
28
-
29
- # autogenerating code
30
-
31
- autogenerate_code: deps_table_update
32
-
33
- # Check that the repo is in a good state
34
-
35
- repo-consistency:
36
- python utils/check_dummies.py
37
- python utils/check_repo.py
38
- python utils/check_inits.py
39
-
40
- # this target runs checks on all files
41
-
42
- quality:
43
- black --check $(check_dirs)
44
- ruff $(check_dirs)
45
- doc-builder style src/diffusers docs/source --max_len 119 --check_only --path_to_docs docs/source
46
- python utils/check_doc_toc.py
47
-
48
- # Format source code automatically and check is there are any problems left that need manual fixing
49
-
50
- extra_style_checks:
51
- python utils/custom_init_isort.py
52
- doc-builder style src/diffusers docs/source --max_len 119 --path_to_docs docs/source
53
- python utils/check_doc_toc.py --fix_and_overwrite
54
-
55
- # this target runs checks on all files and potentially modifies some of them
56
-
57
- style:
58
- black $(check_dirs)
59
- ruff $(check_dirs) --fix
60
- ${MAKE} autogenerate_code
61
- ${MAKE} extra_style_checks
62
-
63
- # Super fast fix and check target that only works on relevant modified files since the branch was made
64
-
65
- fixup: modified_only_fixup extra_style_checks autogenerate_code repo-consistency
66
-
67
- # Make marked copies of snippets of codes conform to the original
68
-
69
- fix-copies:
70
- python utils/check_copies.py --fix_and_overwrite
71
- python utils/check_dummies.py --fix_and_overwrite
72
-
73
- # Run tests for the library
74
-
75
- test:
76
- python -m pytest -n auto --dist=loadfile -s -v ./tests/
77
-
78
- # Run tests for examples
79
-
80
- test-examples:
81
- python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/
82
-
83
-
84
- # Release stuff
85
-
86
- pre-release:
87
- python utils/release.py
88
-
89
- pre-patch:
90
- python utils/release.py --patch
91
-
92
- post-release:
93
- python utils/release.py --post_release
94
-
95
- post-patch:
96
- python utils/release.py --post_release --patch
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/audioldm.md DELETED
@@ -1,51 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # AudioLDM
14
-
15
- AudioLDM was proposed in [AudioLDM: Text-to-Audio Generation with Latent Diffusion Models](https://huggingface.co/papers/2301.12503) by Haohe Liu et al. Inspired by [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview), AudioLDM
16
- is a text-to-audio _latent diffusion model (LDM)_ that learns continuous audio representations from [CLAP](https://huggingface.co/docs/transformers/main/model_doc/clap)
17
- latents. AudioLDM takes a text prompt as input and predicts the corresponding audio. It can generate text-conditional
18
- sound effects, human speech and music.
19
-
20
- The abstract from the paper is:
21
-
22
- *Text-to-audio (TTA) system has recently gained attention for its ability to synthesize general audio based on text descriptions. However, previous studies in TTA have limited generation quality with high computational costs. In this study, we propose AudioLDM, a TTA system that is built on a latent space to learn the continuous audio representations from contrastive language-audio pretraining (CLAP) latents. The pretrained CLAP models enable us to train LDMs with audio embedding while providing text embedding as a condition during sampling. By learning the latent representations of audio signals and their compositions without modeling the cross-modal relationship, AudioLDM is advantageous in both generation quality and computational efficiency. Trained on AudioCaps with a single GPU, AudioLDM achieves state-of-the-art TTA performance measured by both objective and subjective metrics (e.g., frechet distance). Moreover, AudioLDM is the first TTA system that enables various text-guided audio manipulations (e.g., style transfer) in a zero-shot fashion. Our implementation and demos are available at https://audioldm.github.io.*
23
-
24
- The original codebase can be found at [haoheliu/AudioLDM](https://github.com/haoheliu/AudioLDM).
25
-
26
- ## Tips
27
-
28
- When constructing a prompt, keep in mind:
29
-
30
- * Descriptive prompt inputs work best; you can use adjectives to describe the sound (for example, "high quality" or "clear") and make the prompt context specific (for example, "water stream in a forest" instead of "stream").
31
- * It's best to use general terms like "cat" or "dog" instead of specific names or abstract objects the model may not be familiar with.
32
-
33
- During inference:
34
-
35
- * The _quality_ of the predicted audio sample can be controlled by the `num_inference_steps` argument; higher steps give higher quality audio at the expense of slower inference.
36
- * The _length_ of the predicted audio sample can be controlled by varying the `audio_length_in_s` argument.
37
-
38
- <Tip>
39
-
40
- Make sure to check out the Schedulers [guide](/using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](/using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines.
41
-
42
- </Tip>
43
-
44
- ## AudioLDMPipeline
45
- [[autodoc]] AudioLDMPipeline
46
- - all
47
- - __call__
48
-
49
- ## StableDiffusionPipelineOutput
50
-
51
- [[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/dpm_discrete.md DELETED
@@ -1,22 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # DPM Discrete Scheduler inspired by Karras et. al paper
14
-
15
- ## Overview
16
-
17
- Inspired by [Karras et. al](https://arxiv.org/abs/2206.00364). Scheduler ported from @crowsonkb's https://github.com/crowsonkb/k-diffusion library:
18
-
19
- All credit for making this scheduler work goes to [Katherine Crowson](https://github.com/crowsonkb/)
20
-
21
- ## KDPM2DiscreteScheduler
22
- [[autodoc]] KDPM2DiscreteScheduler
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_ddpm_original_checkpoint_to_diffusers.py DELETED
@@ -1,431 +0,0 @@
1
- import argparse
2
- import json
3
-
4
- import torch
5
-
6
- from diffusers import AutoencoderKL, DDPMPipeline, DDPMScheduler, UNet2DModel, VQModel
7
-
8
-
9
- def shave_segments(path, n_shave_prefix_segments=1):
10
- """
11
- Removes segments. Positive values shave the first segments, negative shave the last segments.
12
- """
13
- if n_shave_prefix_segments >= 0:
14
- return ".".join(path.split(".")[n_shave_prefix_segments:])
15
- else:
16
- return ".".join(path.split(".")[:n_shave_prefix_segments])
17
-
18
-
19
- def renew_resnet_paths(old_list, n_shave_prefix_segments=0):
20
- mapping = []
21
- for old_item in old_list:
22
- new_item = old_item
23
- new_item = new_item.replace("block.", "resnets.")
24
- new_item = new_item.replace("conv_shorcut", "conv1")
25
- new_item = new_item.replace("in_shortcut", "conv_shortcut")
26
- new_item = new_item.replace("temb_proj", "time_emb_proj")
27
-
28
- new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
29
-
30
- mapping.append({"old": old_item, "new": new_item})
31
-
32
- return mapping
33
-
34
-
35
- def renew_attention_paths(old_list, n_shave_prefix_segments=0, in_mid=False):
36
- mapping = []
37
- for old_item in old_list:
38
- new_item = old_item
39
-
40
- # In `model.mid`, the layer is called `attn`.
41
- if not in_mid:
42
- new_item = new_item.replace("attn", "attentions")
43
- new_item = new_item.replace(".k.", ".key.")
44
- new_item = new_item.replace(".v.", ".value.")
45
- new_item = new_item.replace(".q.", ".query.")
46
-
47
- new_item = new_item.replace("proj_out", "proj_attn")
48
- new_item = new_item.replace("norm", "group_norm")
49
-
50
- new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
51
- mapping.append({"old": old_item, "new": new_item})
52
-
53
- return mapping
54
-
55
-
56
- def assign_to_checkpoint(
57
- paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None
58
- ):
59
- assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys."
60
-
61
- if attention_paths_to_split is not None:
62
- if config is None:
63
- raise ValueError("Please specify the config if setting 'attention_paths_to_split' to 'True'.")
64
-
65
- for path, path_map in attention_paths_to_split.items():
66
- old_tensor = old_checkpoint[path]
67
- channels = old_tensor.shape[0] // 3
68
-
69
- target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1)
70
-
71
- num_heads = old_tensor.shape[0] // config.get("num_head_channels", 1) // 3
72
-
73
- old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:])
74
- query, key, value = old_tensor.split(channels // num_heads, dim=1)
75
-
76
- checkpoint[path_map["query"]] = query.reshape(target_shape).squeeze()
77
- checkpoint[path_map["key"]] = key.reshape(target_shape).squeeze()
78
- checkpoint[path_map["value"]] = value.reshape(target_shape).squeeze()
79
-
80
- for path in paths:
81
- new_path = path["new"]
82
-
83
- if attention_paths_to_split is not None and new_path in attention_paths_to_split:
84
- continue
85
-
86
- new_path = new_path.replace("down.", "down_blocks.")
87
- new_path = new_path.replace("up.", "up_blocks.")
88
-
89
- if additional_replacements is not None:
90
- for replacement in additional_replacements:
91
- new_path = new_path.replace(replacement["old"], replacement["new"])
92
-
93
- if "attentions" in new_path:
94
- checkpoint[new_path] = old_checkpoint[path["old"]].squeeze()
95
- else:
96
- checkpoint[new_path] = old_checkpoint[path["old"]]
97
-
98
-
99
- def convert_ddpm_checkpoint(checkpoint, config):
100
- """
101
- Takes a state dict and a config, and returns a converted checkpoint.
102
- """
103
- new_checkpoint = {}
104
-
105
- new_checkpoint["time_embedding.linear_1.weight"] = checkpoint["temb.dense.0.weight"]
106
- new_checkpoint["time_embedding.linear_1.bias"] = checkpoint["temb.dense.0.bias"]
107
- new_checkpoint["time_embedding.linear_2.weight"] = checkpoint["temb.dense.1.weight"]
108
- new_checkpoint["time_embedding.linear_2.bias"] = checkpoint["temb.dense.1.bias"]
109
-
110
- new_checkpoint["conv_norm_out.weight"] = checkpoint["norm_out.weight"]
111
- new_checkpoint["conv_norm_out.bias"] = checkpoint["norm_out.bias"]
112
-
113
- new_checkpoint["conv_in.weight"] = checkpoint["conv_in.weight"]
114
- new_checkpoint["conv_in.bias"] = checkpoint["conv_in.bias"]
115
- new_checkpoint["conv_out.weight"] = checkpoint["conv_out.weight"]
116
- new_checkpoint["conv_out.bias"] = checkpoint["conv_out.bias"]
117
-
118
- num_down_blocks = len({".".join(layer.split(".")[:2]) for layer in checkpoint if "down" in layer})
119
- down_blocks = {
120
- layer_id: [key for key in checkpoint if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks)
121
- }
122
-
123
- num_up_blocks = len({".".join(layer.split(".")[:2]) for layer in checkpoint if "up" in layer})
124
- up_blocks = {layer_id: [key for key in checkpoint if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks)}
125
-
126
- for i in range(num_down_blocks):
127
- block_id = (i - 1) // (config["layers_per_block"] + 1)
128
-
129
- if any("downsample" in layer for layer in down_blocks[i]):
130
- new_checkpoint[f"down_blocks.{i}.downsamplers.0.conv.weight"] = checkpoint[
131
- f"down.{i}.downsample.op.weight"
132
- ]
133
- new_checkpoint[f"down_blocks.{i}.downsamplers.0.conv.bias"] = checkpoint[f"down.{i}.downsample.op.bias"]
134
- # new_checkpoint[f'down_blocks.{i}.downsamplers.0.op.weight'] = checkpoint[f'down.{i}.downsample.conv.weight']
135
- # new_checkpoint[f'down_blocks.{i}.downsamplers.0.op.bias'] = checkpoint[f'down.{i}.downsample.conv.bias']
136
-
137
- if any("block" in layer for layer in down_blocks[i]):
138
- num_blocks = len(
139
- {".".join(shave_segments(layer, 2).split(".")[:2]) for layer in down_blocks[i] if "block" in layer}
140
- )
141
- blocks = {
142
- layer_id: [key for key in down_blocks[i] if f"block.{layer_id}" in key]
143
- for layer_id in range(num_blocks)
144
- }
145
-
146
- if num_blocks > 0:
147
- for j in range(config["layers_per_block"]):
148
- paths = renew_resnet_paths(blocks[j])
149
- assign_to_checkpoint(paths, new_checkpoint, checkpoint)
150
-
151
- if any("attn" in layer for layer in down_blocks[i]):
152
- num_attn = len(
153
- {".".join(shave_segments(layer, 2).split(".")[:2]) for layer in down_blocks[i] if "attn" in layer}
154
- )
155
- attns = {
156
- layer_id: [key for key in down_blocks[i] if f"attn.{layer_id}" in key]
157
- for layer_id in range(num_blocks)
158
- }
159
-
160
- if num_attn > 0:
161
- for j in range(config["layers_per_block"]):
162
- paths = renew_attention_paths(attns[j])
163
- assign_to_checkpoint(paths, new_checkpoint, checkpoint, config=config)
164
-
165
- mid_block_1_layers = [key for key in checkpoint if "mid.block_1" in key]
166
- mid_block_2_layers = [key for key in checkpoint if "mid.block_2" in key]
167
- mid_attn_1_layers = [key for key in checkpoint if "mid.attn_1" in key]
168
-
169
- # Mid new 2
170
- paths = renew_resnet_paths(mid_block_1_layers)
171
- assign_to_checkpoint(
172
- paths,
173
- new_checkpoint,
174
- checkpoint,
175
- additional_replacements=[{"old": "mid.", "new": "mid_new_2."}, {"old": "block_1", "new": "resnets.0"}],
176
- )
177
-
178
- paths = renew_resnet_paths(mid_block_2_layers)
179
- assign_to_checkpoint(
180
- paths,
181
- new_checkpoint,
182
- checkpoint,
183
- additional_replacements=[{"old": "mid.", "new": "mid_new_2."}, {"old": "block_2", "new": "resnets.1"}],
184
- )
185
-
186
- paths = renew_attention_paths(mid_attn_1_layers, in_mid=True)
187
- assign_to_checkpoint(
188
- paths,
189
- new_checkpoint,
190
- checkpoint,
191
- additional_replacements=[{"old": "mid.", "new": "mid_new_2."}, {"old": "attn_1", "new": "attentions.0"}],
192
- )
193
-
194
- for i in range(num_up_blocks):
195
- block_id = num_up_blocks - 1 - i
196
-
197
- if any("upsample" in layer for layer in up_blocks[i]):
198
- new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = checkpoint[
199
- f"up.{i}.upsample.conv.weight"
200
- ]
201
- new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = checkpoint[f"up.{i}.upsample.conv.bias"]
202
-
203
- if any("block" in layer for layer in up_blocks[i]):
204
- num_blocks = len(
205
- {".".join(shave_segments(layer, 2).split(".")[:2]) for layer in up_blocks[i] if "block" in layer}
206
- )
207
- blocks = {
208
- layer_id: [key for key in up_blocks[i] if f"block.{layer_id}" in key] for layer_id in range(num_blocks)
209
- }
210
-
211
- if num_blocks > 0:
212
- for j in range(config["layers_per_block"] + 1):
213
- replace_indices = {"old": f"up_blocks.{i}", "new": f"up_blocks.{block_id}"}
214
- paths = renew_resnet_paths(blocks[j])
215
- assign_to_checkpoint(paths, new_checkpoint, checkpoint, additional_replacements=[replace_indices])
216
-
217
- if any("attn" in layer for layer in up_blocks[i]):
218
- num_attn = len(
219
- {".".join(shave_segments(layer, 2).split(".")[:2]) for layer in up_blocks[i] if "attn" in layer}
220
- )
221
- attns = {
222
- layer_id: [key for key in up_blocks[i] if f"attn.{layer_id}" in key] for layer_id in range(num_blocks)
223
- }
224
-
225
- if num_attn > 0:
226
- for j in range(config["layers_per_block"] + 1):
227
- replace_indices = {"old": f"up_blocks.{i}", "new": f"up_blocks.{block_id}"}
228
- paths = renew_attention_paths(attns[j])
229
- assign_to_checkpoint(paths, new_checkpoint, checkpoint, additional_replacements=[replace_indices])
230
-
231
- new_checkpoint = {k.replace("mid_new_2", "mid_block"): v for k, v in new_checkpoint.items()}
232
- return new_checkpoint
233
-
234
-
235
- def convert_vq_autoenc_checkpoint(checkpoint, config):
236
- """
237
- Takes a state dict and a config, and returns a converted checkpoint.
238
- """
239
- new_checkpoint = {}
240
-
241
- new_checkpoint["encoder.conv_norm_out.weight"] = checkpoint["encoder.norm_out.weight"]
242
- new_checkpoint["encoder.conv_norm_out.bias"] = checkpoint["encoder.norm_out.bias"]
243
-
244
- new_checkpoint["encoder.conv_in.weight"] = checkpoint["encoder.conv_in.weight"]
245
- new_checkpoint["encoder.conv_in.bias"] = checkpoint["encoder.conv_in.bias"]
246
- new_checkpoint["encoder.conv_out.weight"] = checkpoint["encoder.conv_out.weight"]
247
- new_checkpoint["encoder.conv_out.bias"] = checkpoint["encoder.conv_out.bias"]
248
-
249
- new_checkpoint["decoder.conv_norm_out.weight"] = checkpoint["decoder.norm_out.weight"]
250
- new_checkpoint["decoder.conv_norm_out.bias"] = checkpoint["decoder.norm_out.bias"]
251
-
252
- new_checkpoint["decoder.conv_in.weight"] = checkpoint["decoder.conv_in.weight"]
253
- new_checkpoint["decoder.conv_in.bias"] = checkpoint["decoder.conv_in.bias"]
254
- new_checkpoint["decoder.conv_out.weight"] = checkpoint["decoder.conv_out.weight"]
255
- new_checkpoint["decoder.conv_out.bias"] = checkpoint["decoder.conv_out.bias"]
256
-
257
- num_down_blocks = len({".".join(layer.split(".")[:3]) for layer in checkpoint if "down" in layer})
258
- down_blocks = {
259
- layer_id: [key for key in checkpoint if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks)
260
- }
261
-
262
- num_up_blocks = len({".".join(layer.split(".")[:3]) for layer in checkpoint if "up" in layer})
263
- up_blocks = {layer_id: [key for key in checkpoint if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks)}
264
-
265
- for i in range(num_down_blocks):
266
- block_id = (i - 1) // (config["layers_per_block"] + 1)
267
-
268
- if any("downsample" in layer for layer in down_blocks[i]):
269
- new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = checkpoint[
270
- f"encoder.down.{i}.downsample.conv.weight"
271
- ]
272
- new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = checkpoint[
273
- f"encoder.down.{i}.downsample.conv.bias"
274
- ]
275
-
276
- if any("block" in layer for layer in down_blocks[i]):
277
- num_blocks = len(
278
- {".".join(shave_segments(layer, 3).split(".")[:3]) for layer in down_blocks[i] if "block" in layer}
279
- )
280
- blocks = {
281
- layer_id: [key for key in down_blocks[i] if f"block.{layer_id}" in key]
282
- for layer_id in range(num_blocks)
283
- }
284
-
285
- if num_blocks > 0:
286
- for j in range(config["layers_per_block"]):
287
- paths = renew_resnet_paths(blocks[j])
288
- assign_to_checkpoint(paths, new_checkpoint, checkpoint)
289
-
290
- if any("attn" in layer for layer in down_blocks[i]):
291
- num_attn = len(
292
- {".".join(shave_segments(layer, 3).split(".")[:3]) for layer in down_blocks[i] if "attn" in layer}
293
- )
294
- attns = {
295
- layer_id: [key for key in down_blocks[i] if f"attn.{layer_id}" in key]
296
- for layer_id in range(num_blocks)
297
- }
298
-
299
- if num_attn > 0:
300
- for j in range(config["layers_per_block"]):
301
- paths = renew_attention_paths(attns[j])
302
- assign_to_checkpoint(paths, new_checkpoint, checkpoint, config=config)
303
-
304
- mid_block_1_layers = [key for key in checkpoint if "mid.block_1" in key]
305
- mid_block_2_layers = [key for key in checkpoint if "mid.block_2" in key]
306
- mid_attn_1_layers = [key for key in checkpoint if "mid.attn_1" in key]
307
-
308
- # Mid new 2
309
- paths = renew_resnet_paths(mid_block_1_layers)
310
- assign_to_checkpoint(
311
- paths,
312
- new_checkpoint,
313
- checkpoint,
314
- additional_replacements=[{"old": "mid.", "new": "mid_new_2."}, {"old": "block_1", "new": "resnets.0"}],
315
- )
316
-
317
- paths = renew_resnet_paths(mid_block_2_layers)
318
- assign_to_checkpoint(
319
- paths,
320
- new_checkpoint,
321
- checkpoint,
322
- additional_replacements=[{"old": "mid.", "new": "mid_new_2."}, {"old": "block_2", "new": "resnets.1"}],
323
- )
324
-
325
- paths = renew_attention_paths(mid_attn_1_layers, in_mid=True)
326
- assign_to_checkpoint(
327
- paths,
328
- new_checkpoint,
329
- checkpoint,
330
- additional_replacements=[{"old": "mid.", "new": "mid_new_2."}, {"old": "attn_1", "new": "attentions.0"}],
331
- )
332
-
333
- for i in range(num_up_blocks):
334
- block_id = num_up_blocks - 1 - i
335
-
336
- if any("upsample" in layer for layer in up_blocks[i]):
337
- new_checkpoint[f"decoder.up_blocks.{block_id}.upsamplers.0.conv.weight"] = checkpoint[
338
- f"decoder.up.{i}.upsample.conv.weight"
339
- ]
340
- new_checkpoint[f"decoder.up_blocks.{block_id}.upsamplers.0.conv.bias"] = checkpoint[
341
- f"decoder.up.{i}.upsample.conv.bias"
342
- ]
343
-
344
- if any("block" in layer for layer in up_blocks[i]):
345
- num_blocks = len(
346
- {".".join(shave_segments(layer, 3).split(".")[:3]) for layer in up_blocks[i] if "block" in layer}
347
- )
348
- blocks = {
349
- layer_id: [key for key in up_blocks[i] if f"block.{layer_id}" in key] for layer_id in range(num_blocks)
350
- }
351
-
352
- if num_blocks > 0:
353
- for j in range(config["layers_per_block"] + 1):
354
- replace_indices = {"old": f"up_blocks.{i}", "new": f"up_blocks.{block_id}"}
355
- paths = renew_resnet_paths(blocks[j])
356
- assign_to_checkpoint(paths, new_checkpoint, checkpoint, additional_replacements=[replace_indices])
357
-
358
- if any("attn" in layer for layer in up_blocks[i]):
359
- num_attn = len(
360
- {".".join(shave_segments(layer, 3).split(".")[:3]) for layer in up_blocks[i] if "attn" in layer}
361
- )
362
- attns = {
363
- layer_id: [key for key in up_blocks[i] if f"attn.{layer_id}" in key] for layer_id in range(num_blocks)
364
- }
365
-
366
- if num_attn > 0:
367
- for j in range(config["layers_per_block"] + 1):
368
- replace_indices = {"old": f"up_blocks.{i}", "new": f"up_blocks.{block_id}"}
369
- paths = renew_attention_paths(attns[j])
370
- assign_to_checkpoint(paths, new_checkpoint, checkpoint, additional_replacements=[replace_indices])
371
-
372
- new_checkpoint = {k.replace("mid_new_2", "mid_block"): v for k, v in new_checkpoint.items()}
373
- new_checkpoint["quant_conv.weight"] = checkpoint["quant_conv.weight"]
374
- new_checkpoint["quant_conv.bias"] = checkpoint["quant_conv.bias"]
375
- if "quantize.embedding.weight" in checkpoint:
376
- new_checkpoint["quantize.embedding.weight"] = checkpoint["quantize.embedding.weight"]
377
- new_checkpoint["post_quant_conv.weight"] = checkpoint["post_quant_conv.weight"]
378
- new_checkpoint["post_quant_conv.bias"] = checkpoint["post_quant_conv.bias"]
379
-
380
- return new_checkpoint
381
-
382
-
383
- if __name__ == "__main__":
384
- parser = argparse.ArgumentParser()
385
-
386
- parser.add_argument(
387
- "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
388
- )
389
-
390
- parser.add_argument(
391
- "--config_file",
392
- default=None,
393
- type=str,
394
- required=True,
395
- help="The config json file corresponding to the architecture.",
396
- )
397
-
398
- parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
399
-
400
- args = parser.parse_args()
401
- checkpoint = torch.load(args.checkpoint_path)
402
-
403
- with open(args.config_file) as f:
404
- config = json.loads(f.read())
405
-
406
- # unet case
407
- key_prefix_set = {key.split(".")[0] for key in checkpoint.keys()}
408
- if "encoder" in key_prefix_set and "decoder" in key_prefix_set:
409
- converted_checkpoint = convert_vq_autoenc_checkpoint(checkpoint, config)
410
- else:
411
- converted_checkpoint = convert_ddpm_checkpoint(checkpoint, config)
412
-
413
- if "ddpm" in config:
414
- del config["ddpm"]
415
-
416
- if config["_class_name"] == "VQModel":
417
- model = VQModel(**config)
418
- model.load_state_dict(converted_checkpoint)
419
- model.save_pretrained(args.dump_path)
420
- elif config["_class_name"] == "AutoencoderKL":
421
- model = AutoencoderKL(**config)
422
- model.load_state_dict(converted_checkpoint)
423
- model.save_pretrained(args.dump_path)
424
- else:
425
- model = UNet2DModel(**config)
426
- model.load_state_dict(converted_checkpoint)
427
-
428
- scheduler = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1]))
429
-
430
- pipe = DDPMPipeline(unet=model, scheduler=scheduler)
431
- pipe.save_pretrained(args.dump_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/faster_rcnn.py DELETED
@@ -1,24 +0,0 @@
1
- from ..builder import DETECTORS
2
- from .two_stage import TwoStageDetector
3
-
4
-
5
- @DETECTORS.register_module()
6
- class FasterRCNN(TwoStageDetector):
7
- """Implementation of `Faster R-CNN <https://arxiv.org/abs/1506.01497>`_"""
8
-
9
- def __init__(self,
10
- backbone,
11
- rpn_head,
12
- roi_head,
13
- train_cfg,
14
- test_cfg,
15
- neck=None,
16
- pretrained=None):
17
- super(FasterRCNN, self).__init__(
18
- backbone=backbone,
19
- neck=neck,
20
- rpn_head=rpn_head,
21
- roi_head=roi_head,
22
- train_cfg=train_cfg,
23
- test_cfg=test_cfg,
24
- pretrained=pretrained)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/options/train_options.py DELETED
@@ -1,30 +0,0 @@
1
- from .base_options import BaseOptions
2
-
3
-
4
- class TrainOptions(BaseOptions):
5
- def initialize(self, parser):
6
- parser = BaseOptions.initialize(self, parser)
7
-
8
- # training parameters
9
- parser.add_argument('--iter_count', type=int, default=0, help='the starting epoch count')
10
- parser.add_argument('--n_iter', type=int, default=20000000, help='# of iter with initial learning rate')
11
- parser.add_argument('--n_iter_decay', type=int, default=00000000, help='# of iter to decay learning rate to zero')
12
- parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
13
- # learning rate and loss weight
14
- parser.add_argument('--lr_policy', type=str, default='linear', help='learning rate policy[lambda|step|plateau]')
15
- parser.add_argument('--lr', type=float, default=1e-4, help='initial learning rate for adam')
16
- parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
17
- parser.add_argument('--beta2', type=float, default=0.9, help='momentum term of adam')
18
- parser.add_argument('--gan_mode', type=str, default='nonsaturating', choices=['hinge', 'lsgan', 'standard', 'wgan-gp', 'nonsaturating'])
19
- # display the results
20
- parser.add_argument('--display_freq', type=int, default=1000, help='frequency of showing training results on screen')
21
- parser.add_argument('--display_ncols', type=int, default=3, help='if positive, display all examples in a single visdom web panel with certain number of examples per row.')
22
- parser.add_argument('--print_freq', type=int, default=1000, help='frequency of showing training results on console')
23
- parser.add_argument('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html')
24
- parser.add_argument('--save_latest_freq', type=int, default=1000, help='frequency of saving the latest results')
25
- parser.add_argument('--save_iters_freq', type=int, default=100000, help='frequency of saving checkpoints at the end of epochs')
26
- parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results')
27
-
28
- self.isTrain = True
29
-
30
- return parser
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/deoldify/save.py DELETED
@@ -1,29 +0,0 @@
1
- from fastai.basic_train import Learner, LearnerCallback
2
- from fastai.vision.gan import GANLearner
3
-
4
-
5
- class GANSaveCallback(LearnerCallback):
6
- """A `LearnerCallback` that saves history of metrics while training `learn` into CSV `filename`."""
7
-
8
- def __init__(
9
- self,
10
- learn: GANLearner,
11
- learn_gen: Learner,
12
- filename: str,
13
- save_iters: int = 1000,
14
- ):
15
- super().__init__(learn)
16
- self.learn_gen = learn_gen
17
- self.filename = filename
18
- self.save_iters = save_iters
19
-
20
- def on_batch_end(self, iteration: int, epoch: int, **kwargs) -> None:
21
- if iteration == 0:
22
- return
23
-
24
- if iteration % self.save_iters == 0:
25
- self._save_gen_learner(iteration=iteration, epoch=epoch)
26
-
27
- def _save_gen_learner(self, iteration: int, epoch: int):
28
- filename = '{}_{}_{}'.format(self.filename, epoch, iteration)
29
- self.learn_gen.save(filename)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/index/package_finder.py DELETED
@@ -1,1029 +0,0 @@
1
- """Routines related to PyPI, indexes"""
2
-
3
- import enum
4
- import functools
5
- import itertools
6
- import logging
7
- import re
8
- from typing import TYPE_CHECKING, FrozenSet, Iterable, List, Optional, Set, Tuple, Union
9
-
10
- from pip._vendor.packaging import specifiers
11
- from pip._vendor.packaging.tags import Tag
12
- from pip._vendor.packaging.utils import canonicalize_name
13
- from pip._vendor.packaging.version import _BaseVersion
14
- from pip._vendor.packaging.version import parse as parse_version
15
-
16
- from pip._internal.exceptions import (
17
- BestVersionAlreadyInstalled,
18
- DistributionNotFound,
19
- InvalidWheelFilename,
20
- UnsupportedWheel,
21
- )
22
- from pip._internal.index.collector import LinkCollector, parse_links
23
- from pip._internal.models.candidate import InstallationCandidate
24
- from pip._internal.models.format_control import FormatControl
25
- from pip._internal.models.link import Link
26
- from pip._internal.models.search_scope import SearchScope
27
- from pip._internal.models.selection_prefs import SelectionPreferences
28
- from pip._internal.models.target_python import TargetPython
29
- from pip._internal.models.wheel import Wheel
30
- from pip._internal.req import InstallRequirement
31
- from pip._internal.utils._log import getLogger
32
- from pip._internal.utils.filetypes import WHEEL_EXTENSION
33
- from pip._internal.utils.hashes import Hashes
34
- from pip._internal.utils.logging import indent_log
35
- from pip._internal.utils.misc import build_netloc
36
- from pip._internal.utils.packaging import check_requires_python
37
- from pip._internal.utils.unpacking import SUPPORTED_EXTENSIONS
38
-
39
- if TYPE_CHECKING:
40
- from pip._vendor.typing_extensions import TypeGuard
41
-
42
- __all__ = ["FormatControl", "BestCandidateResult", "PackageFinder"]
43
-
44
-
45
- logger = getLogger(__name__)
46
-
47
- BuildTag = Union[Tuple[()], Tuple[int, str]]
48
- CandidateSortingKey = Tuple[int, int, int, _BaseVersion, Optional[int], BuildTag]
49
-
50
-
51
- def _check_link_requires_python(
52
- link: Link,
53
- version_info: Tuple[int, int, int],
54
- ignore_requires_python: bool = False,
55
- ) -> bool:
56
- """
57
- Return whether the given Python version is compatible with a link's
58
- "Requires-Python" value.
59
-
60
- :param version_info: A 3-tuple of ints representing the Python
61
- major-minor-micro version to check.
62
- :param ignore_requires_python: Whether to ignore the "Requires-Python"
63
- value if the given Python version isn't compatible.
64
- """
65
- try:
66
- is_compatible = check_requires_python(
67
- link.requires_python,
68
- version_info=version_info,
69
- )
70
- except specifiers.InvalidSpecifier:
71
- logger.debug(
72
- "Ignoring invalid Requires-Python (%r) for link: %s",
73
- link.requires_python,
74
- link,
75
- )
76
- else:
77
- if not is_compatible:
78
- version = ".".join(map(str, version_info))
79
- if not ignore_requires_python:
80
- logger.verbose(
81
- "Link requires a different Python (%s not in: %r): %s",
82
- version,
83
- link.requires_python,
84
- link,
85
- )
86
- return False
87
-
88
- logger.debug(
89
- "Ignoring failed Requires-Python check (%s not in: %r) for link: %s",
90
- version,
91
- link.requires_python,
92
- link,
93
- )
94
-
95
- return True
96
-
97
-
98
- class LinkType(enum.Enum):
99
- candidate = enum.auto()
100
- different_project = enum.auto()
101
- yanked = enum.auto()
102
- format_unsupported = enum.auto()
103
- format_invalid = enum.auto()
104
- platform_mismatch = enum.auto()
105
- requires_python_mismatch = enum.auto()
106
-
107
-
108
- class LinkEvaluator:
109
-
110
- """
111
- Responsible for evaluating links for a particular project.
112
- """
113
-
114
- _py_version_re = re.compile(r"-py([123]\.?[0-9]?)$")
115
-
116
- # Don't include an allow_yanked default value to make sure each call
117
- # site considers whether yanked releases are allowed. This also causes
118
- # that decision to be made explicit in the calling code, which helps
119
- # people when reading the code.
120
- def __init__(
121
- self,
122
- project_name: str,
123
- canonical_name: str,
124
- formats: FrozenSet[str],
125
- target_python: TargetPython,
126
- allow_yanked: bool,
127
- ignore_requires_python: Optional[bool] = None,
128
- ) -> None:
129
- """
130
- :param project_name: The user supplied package name.
131
- :param canonical_name: The canonical package name.
132
- :param formats: The formats allowed for this package. Should be a set
133
- with 'binary' or 'source' or both in it.
134
- :param target_python: The target Python interpreter to use when
135
- evaluating link compatibility. This is used, for example, to
136
- check wheel compatibility, as well as when checking the Python
137
- version, e.g. the Python version embedded in a link filename
138
- (or egg fragment) and against an HTML link's optional PEP 503
139
- "data-requires-python" attribute.
140
- :param allow_yanked: Whether files marked as yanked (in the sense
141
- of PEP 592) are permitted to be candidates for install.
142
- :param ignore_requires_python: Whether to ignore incompatible
143
- PEP 503 "data-requires-python" values in HTML links. Defaults
144
- to False.
145
- """
146
- if ignore_requires_python is None:
147
- ignore_requires_python = False
148
-
149
- self._allow_yanked = allow_yanked
150
- self._canonical_name = canonical_name
151
- self._ignore_requires_python = ignore_requires_python
152
- self._formats = formats
153
- self._target_python = target_python
154
-
155
- self.project_name = project_name
156
-
157
- def evaluate_link(self, link: Link) -> Tuple[LinkType, str]:
158
- """
159
- Determine whether a link is a candidate for installation.
160
-
161
- :return: A tuple (result, detail), where *result* is an enum
162
- representing whether the evaluation found a candidate, or the reason
163
- why one is not found. If a candidate is found, *detail* will be the
164
- candidate's version string; if one is not found, it contains the
165
- reason the link fails to qualify.
166
- """
167
- version = None
168
- if link.is_yanked and not self._allow_yanked:
169
- reason = link.yanked_reason or "<none given>"
170
- return (LinkType.yanked, f"yanked for reason: {reason}")
171
-
172
- if link.egg_fragment:
173
- egg_info = link.egg_fragment
174
- ext = link.ext
175
- else:
176
- egg_info, ext = link.splitext()
177
- if not ext:
178
- return (LinkType.format_unsupported, "not a file")
179
- if ext not in SUPPORTED_EXTENSIONS:
180
- return (
181
- LinkType.format_unsupported,
182
- f"unsupported archive format: {ext}",
183
- )
184
- if "binary" not in self._formats and ext == WHEEL_EXTENSION:
185
- reason = f"No binaries permitted for {self.project_name}"
186
- return (LinkType.format_unsupported, reason)
187
- if "macosx10" in link.path and ext == ".zip":
188
- return (LinkType.format_unsupported, "macosx10 one")
189
- if ext == WHEEL_EXTENSION:
190
- try:
191
- wheel = Wheel(link.filename)
192
- except InvalidWheelFilename:
193
- return (
194
- LinkType.format_invalid,
195
- "invalid wheel filename",
196
- )
197
- if canonicalize_name(wheel.name) != self._canonical_name:
198
- reason = f"wrong project name (not {self.project_name})"
199
- return (LinkType.different_project, reason)
200
-
201
- supported_tags = self._target_python.get_tags()
202
- if not wheel.supported(supported_tags):
203
- # Include the wheel's tags in the reason string to
204
- # simplify troubleshooting compatibility issues.
205
- file_tags = ", ".join(wheel.get_formatted_file_tags())
206
- reason = (
207
- f"none of the wheel's tags ({file_tags}) are compatible "
208
- f"(run pip debug --verbose to show compatible tags)"
209
- )
210
- return (LinkType.platform_mismatch, reason)
211
-
212
- version = wheel.version
213
-
214
- # This should be up by the self.ok_binary check, but see issue 2700.
215
- if "source" not in self._formats and ext != WHEEL_EXTENSION:
216
- reason = f"No sources permitted for {self.project_name}"
217
- return (LinkType.format_unsupported, reason)
218
-
219
- if not version:
220
- version = _extract_version_from_fragment(
221
- egg_info,
222
- self._canonical_name,
223
- )
224
- if not version:
225
- reason = f"Missing project version for {self.project_name}"
226
- return (LinkType.format_invalid, reason)
227
-
228
- match = self._py_version_re.search(version)
229
- if match:
230
- version = version[: match.start()]
231
- py_version = match.group(1)
232
- if py_version != self._target_python.py_version:
233
- return (
234
- LinkType.platform_mismatch,
235
- "Python version is incorrect",
236
- )
237
-
238
- supports_python = _check_link_requires_python(
239
- link,
240
- version_info=self._target_python.py_version_info,
241
- ignore_requires_python=self._ignore_requires_python,
242
- )
243
- if not supports_python:
244
- reason = f"{version} Requires-Python {link.requires_python}"
245
- return (LinkType.requires_python_mismatch, reason)
246
-
247
- logger.debug("Found link %s, version: %s", link, version)
248
-
249
- return (LinkType.candidate, version)
250
-
251
-
252
- def filter_unallowed_hashes(
253
- candidates: List[InstallationCandidate],
254
- hashes: Optional[Hashes],
255
- project_name: str,
256
- ) -> List[InstallationCandidate]:
257
- """
258
- Filter out candidates whose hashes aren't allowed, and return a new
259
- list of candidates.
260
-
261
- If at least one candidate has an allowed hash, then all candidates with
262
- either an allowed hash or no hash specified are returned. Otherwise,
263
- the given candidates are returned.
264
-
265
- Including the candidates with no hash specified when there is a match
266
- allows a warning to be logged if there is a more preferred candidate
267
- with no hash specified. Returning all candidates in the case of no
268
- matches lets pip report the hash of the candidate that would otherwise
269
- have been installed (e.g. permitting the user to more easily update
270
- their requirements file with the desired hash).
271
- """
272
- if not hashes:
273
- logger.debug(
274
- "Given no hashes to check %s links for project %r: "
275
- "discarding no candidates",
276
- len(candidates),
277
- project_name,
278
- )
279
- # Make sure we're not returning back the given value.
280
- return list(candidates)
281
-
282
- matches_or_no_digest = []
283
- # Collect the non-matches for logging purposes.
284
- non_matches = []
285
- match_count = 0
286
- for candidate in candidates:
287
- link = candidate.link
288
- if not link.has_hash:
289
- pass
290
- elif link.is_hash_allowed(hashes=hashes):
291
- match_count += 1
292
- else:
293
- non_matches.append(candidate)
294
- continue
295
-
296
- matches_or_no_digest.append(candidate)
297
-
298
- if match_count:
299
- filtered = matches_or_no_digest
300
- else:
301
- # Make sure we're not returning back the given value.
302
- filtered = list(candidates)
303
-
304
- if len(filtered) == len(candidates):
305
- discard_message = "discarding no candidates"
306
- else:
307
- discard_message = "discarding {} non-matches:\n {}".format(
308
- len(non_matches),
309
- "\n ".join(str(candidate.link) for candidate in non_matches),
310
- )
311
-
312
- logger.debug(
313
- "Checked %s links for project %r against %s hashes "
314
- "(%s matches, %s no digest): %s",
315
- len(candidates),
316
- project_name,
317
- hashes.digest_count,
318
- match_count,
319
- len(matches_or_no_digest) - match_count,
320
- discard_message,
321
- )
322
-
323
- return filtered
324
-
325
-
326
- class CandidatePreferences:
327
-
328
- """
329
- Encapsulates some of the preferences for filtering and sorting
330
- InstallationCandidate objects.
331
- """
332
-
333
- def __init__(
334
- self,
335
- prefer_binary: bool = False,
336
- allow_all_prereleases: bool = False,
337
- ) -> None:
338
- """
339
- :param allow_all_prereleases: Whether to allow all pre-releases.
340
- """
341
- self.allow_all_prereleases = allow_all_prereleases
342
- self.prefer_binary = prefer_binary
343
-
344
-
345
- class BestCandidateResult:
346
- """A collection of candidates, returned by `PackageFinder.find_best_candidate`.
347
-
348
- This class is only intended to be instantiated by CandidateEvaluator's
349
- `compute_best_candidate()` method.
350
- """
351
-
352
- def __init__(
353
- self,
354
- candidates: List[InstallationCandidate],
355
- applicable_candidates: List[InstallationCandidate],
356
- best_candidate: Optional[InstallationCandidate],
357
- ) -> None:
358
- """
359
- :param candidates: A sequence of all available candidates found.
360
- :param applicable_candidates: The applicable candidates.
361
- :param best_candidate: The most preferred candidate found, or None
362
- if no applicable candidates were found.
363
- """
364
- assert set(applicable_candidates) <= set(candidates)
365
-
366
- if best_candidate is None:
367
- assert not applicable_candidates
368
- else:
369
- assert best_candidate in applicable_candidates
370
-
371
- self._applicable_candidates = applicable_candidates
372
- self._candidates = candidates
373
-
374
- self.best_candidate = best_candidate
375
-
376
- def iter_all(self) -> Iterable[InstallationCandidate]:
377
- """Iterate through all candidates."""
378
- return iter(self._candidates)
379
-
380
- def iter_applicable(self) -> Iterable[InstallationCandidate]:
381
- """Iterate through the applicable candidates."""
382
- return iter(self._applicable_candidates)
383
-
384
-
385
- class CandidateEvaluator:
386
-
387
- """
388
- Responsible for filtering and sorting candidates for installation based
389
- on what tags are valid.
390
- """
391
-
392
- @classmethod
393
- def create(
394
- cls,
395
- project_name: str,
396
- target_python: Optional[TargetPython] = None,
397
- prefer_binary: bool = False,
398
- allow_all_prereleases: bool = False,
399
- specifier: Optional[specifiers.BaseSpecifier] = None,
400
- hashes: Optional[Hashes] = None,
401
- ) -> "CandidateEvaluator":
402
- """Create a CandidateEvaluator object.
403
-
404
- :param target_python: The target Python interpreter to use when
405
- checking compatibility. If None (the default), a TargetPython
406
- object will be constructed from the running Python.
407
- :param specifier: An optional object implementing `filter`
408
- (e.g. `packaging.specifiers.SpecifierSet`) to filter applicable
409
- versions.
410
- :param hashes: An optional collection of allowed hashes.
411
- """
412
- if target_python is None:
413
- target_python = TargetPython()
414
- if specifier is None:
415
- specifier = specifiers.SpecifierSet()
416
-
417
- supported_tags = target_python.get_tags()
418
-
419
- return cls(
420
- project_name=project_name,
421
- supported_tags=supported_tags,
422
- specifier=specifier,
423
- prefer_binary=prefer_binary,
424
- allow_all_prereleases=allow_all_prereleases,
425
- hashes=hashes,
426
- )
427
-
428
- def __init__(
429
- self,
430
- project_name: str,
431
- supported_tags: List[Tag],
432
- specifier: specifiers.BaseSpecifier,
433
- prefer_binary: bool = False,
434
- allow_all_prereleases: bool = False,
435
- hashes: Optional[Hashes] = None,
436
- ) -> None:
437
- """
438
- :param supported_tags: The PEP 425 tags supported by the target
439
- Python in order of preference (most preferred first).
440
- """
441
- self._allow_all_prereleases = allow_all_prereleases
442
- self._hashes = hashes
443
- self._prefer_binary = prefer_binary
444
- self._project_name = project_name
445
- self._specifier = specifier
446
- self._supported_tags = supported_tags
447
- # Since the index of the tag in the _supported_tags list is used
448
- # as a priority, precompute a map from tag to index/priority to be
449
- # used in wheel.find_most_preferred_tag.
450
- self._wheel_tag_preferences = {
451
- tag: idx for idx, tag in enumerate(supported_tags)
452
- }
453
-
454
- def get_applicable_candidates(
455
- self,
456
- candidates: List[InstallationCandidate],
457
- ) -> List[InstallationCandidate]:
458
- """
459
- Return the applicable candidates from a list of candidates.
460
- """
461
- # Using None infers from the specifier instead.
462
- allow_prereleases = self._allow_all_prereleases or None
463
- specifier = self._specifier
464
- versions = {
465
- str(v)
466
- for v in specifier.filter(
467
- # We turn the version object into a str here because otherwise
468
- # when we're debundled but setuptools isn't, Python will see
469
- # packaging.version.Version and
470
- # pkg_resources._vendor.packaging.version.Version as different
471
- # types. This way we'll use a str as a common data interchange
472
- # format. If we stop using the pkg_resources provided specifier
473
- # and start using our own, we can drop the cast to str().
474
- (str(c.version) for c in candidates),
475
- prereleases=allow_prereleases,
476
- )
477
- }
478
-
479
- # Again, converting version to str to deal with debundling.
480
- applicable_candidates = [c for c in candidates if str(c.version) in versions]
481
-
482
- filtered_applicable_candidates = filter_unallowed_hashes(
483
- candidates=applicable_candidates,
484
- hashes=self._hashes,
485
- project_name=self._project_name,
486
- )
487
-
488
- return sorted(filtered_applicable_candidates, key=self._sort_key)
489
-
490
- def _sort_key(self, candidate: InstallationCandidate) -> CandidateSortingKey:
491
- """
492
- Function to pass as the `key` argument to a call to sorted() to sort
493
- InstallationCandidates by preference.
494
-
495
- Returns a tuple such that tuples sorting as greater using Python's
496
- default comparison operator are more preferred.
497
-
498
- The preference is as follows:
499
-
500
- First and foremost, candidates with allowed (matching) hashes are
501
- always preferred over candidates without matching hashes. This is
502
- because e.g. if the only candidate with an allowed hash is yanked,
503
- we still want to use that candidate.
504
-
505
- Second, excepting hash considerations, candidates that have been
506
- yanked (in the sense of PEP 592) are always less preferred than
507
- candidates that haven't been yanked. Then:
508
-
509
- If not finding wheels, they are sorted by version only.
510
- If finding wheels, then the sort order is by version, then:
511
- 1. existing installs
512
- 2. wheels ordered via Wheel.support_index_min(self._supported_tags)
513
- 3. source archives
514
- If prefer_binary was set, then all wheels are sorted above sources.
515
-
516
- Note: it was considered to embed this logic into the Link
517
- comparison operators, but then different sdist links
518
- with the same version, would have to be considered equal
519
- """
520
- valid_tags = self._supported_tags
521
- support_num = len(valid_tags)
522
- build_tag: BuildTag = ()
523
- binary_preference = 0
524
- link = candidate.link
525
- if link.is_wheel:
526
- # can raise InvalidWheelFilename
527
- wheel = Wheel(link.filename)
528
- try:
529
- pri = -(
530
- wheel.find_most_preferred_tag(
531
- valid_tags, self._wheel_tag_preferences
532
- )
533
- )
534
- except ValueError:
535
- raise UnsupportedWheel(
536
- "{} is not a supported wheel for this platform. It "
537
- "can't be sorted.".format(wheel.filename)
538
- )
539
- if self._prefer_binary:
540
- binary_preference = 1
541
- if wheel.build_tag is not None:
542
- match = re.match(r"^(\d+)(.*)$", wheel.build_tag)
543
- assert match is not None, "guaranteed by filename validation"
544
- build_tag_groups = match.groups()
545
- build_tag = (int(build_tag_groups[0]), build_tag_groups[1])
546
- else: # sdist
547
- pri = -(support_num)
548
- has_allowed_hash = int(link.is_hash_allowed(self._hashes))
549
- yank_value = -1 * int(link.is_yanked) # -1 for yanked.
550
- return (
551
- has_allowed_hash,
552
- yank_value,
553
- binary_preference,
554
- candidate.version,
555
- pri,
556
- build_tag,
557
- )
558
-
559
- def sort_best_candidate(
560
- self,
561
- candidates: List[InstallationCandidate],
562
- ) -> Optional[InstallationCandidate]:
563
- """
564
- Return the best candidate per the instance's sort order, or None if
565
- no candidate is acceptable.
566
- """
567
- if not candidates:
568
- return None
569
- best_candidate = max(candidates, key=self._sort_key)
570
- return best_candidate
571
-
572
- def compute_best_candidate(
573
- self,
574
- candidates: List[InstallationCandidate],
575
- ) -> BestCandidateResult:
576
- """
577
- Compute and return a `BestCandidateResult` instance.
578
- """
579
- applicable_candidates = self.get_applicable_candidates(candidates)
580
-
581
- best_candidate = self.sort_best_candidate(applicable_candidates)
582
-
583
- return BestCandidateResult(
584
- candidates,
585
- applicable_candidates=applicable_candidates,
586
- best_candidate=best_candidate,
587
- )
588
-
589
-
590
- class PackageFinder:
591
- """This finds packages.
592
-
593
- This is meant to match easy_install's technique for looking for
594
- packages, by reading pages and looking for appropriate links.
595
- """
596
-
597
- def __init__(
598
- self,
599
- link_collector: LinkCollector,
600
- target_python: TargetPython,
601
- allow_yanked: bool,
602
- format_control: Optional[FormatControl] = None,
603
- candidate_prefs: Optional[CandidatePreferences] = None,
604
- ignore_requires_python: Optional[bool] = None,
605
- ) -> None:
606
- """
607
- This constructor is primarily meant to be used by the create() class
608
- method and from tests.
609
-
610
- :param format_control: A FormatControl object, used to control
611
- the selection of source packages / binary packages when consulting
612
- the index and links.
613
- :param candidate_prefs: Options to use when creating a
614
- CandidateEvaluator object.
615
- """
616
- if candidate_prefs is None:
617
- candidate_prefs = CandidatePreferences()
618
-
619
- format_control = format_control or FormatControl(set(), set())
620
-
621
- self._allow_yanked = allow_yanked
622
- self._candidate_prefs = candidate_prefs
623
- self._ignore_requires_python = ignore_requires_python
624
- self._link_collector = link_collector
625
- self._target_python = target_python
626
-
627
- self.format_control = format_control
628
-
629
- # These are boring links that have already been logged somehow.
630
- self._logged_links: Set[Tuple[Link, LinkType, str]] = set()
631
-
632
- # Don't include an allow_yanked default value to make sure each call
633
- # site considers whether yanked releases are allowed. This also causes
634
- # that decision to be made explicit in the calling code, which helps
635
- # people when reading the code.
636
- @classmethod
637
- def create(
638
- cls,
639
- link_collector: LinkCollector,
640
- selection_prefs: SelectionPreferences,
641
- target_python: Optional[TargetPython] = None,
642
- ) -> "PackageFinder":
643
- """Create a PackageFinder.
644
-
645
- :param selection_prefs: The candidate selection preferences, as a
646
- SelectionPreferences object.
647
- :param target_python: The target Python interpreter to use when
648
- checking compatibility. If None (the default), a TargetPython
649
- object will be constructed from the running Python.
650
- """
651
- if target_python is None:
652
- target_python = TargetPython()
653
-
654
- candidate_prefs = CandidatePreferences(
655
- prefer_binary=selection_prefs.prefer_binary,
656
- allow_all_prereleases=selection_prefs.allow_all_prereleases,
657
- )
658
-
659
- return cls(
660
- candidate_prefs=candidate_prefs,
661
- link_collector=link_collector,
662
- target_python=target_python,
663
- allow_yanked=selection_prefs.allow_yanked,
664
- format_control=selection_prefs.format_control,
665
- ignore_requires_python=selection_prefs.ignore_requires_python,
666
- )
667
-
668
- @property
669
- def target_python(self) -> TargetPython:
670
- return self._target_python
671
-
672
- @property
673
- def search_scope(self) -> SearchScope:
674
- return self._link_collector.search_scope
675
-
676
- @search_scope.setter
677
- def search_scope(self, search_scope: SearchScope) -> None:
678
- self._link_collector.search_scope = search_scope
679
-
680
- @property
681
- def find_links(self) -> List[str]:
682
- return self._link_collector.find_links
683
-
684
- @property
685
- def index_urls(self) -> List[str]:
686
- return self.search_scope.index_urls
687
-
688
- @property
689
- def trusted_hosts(self) -> Iterable[str]:
690
- for host_port in self._link_collector.session.pip_trusted_origins:
691
- yield build_netloc(*host_port)
692
-
693
- @property
694
- def allow_all_prereleases(self) -> bool:
695
- return self._candidate_prefs.allow_all_prereleases
696
-
697
- def set_allow_all_prereleases(self) -> None:
698
- self._candidate_prefs.allow_all_prereleases = True
699
-
700
- @property
701
- def prefer_binary(self) -> bool:
702
- return self._candidate_prefs.prefer_binary
703
-
704
- def set_prefer_binary(self) -> None:
705
- self._candidate_prefs.prefer_binary = True
706
-
707
- def requires_python_skipped_reasons(self) -> List[str]:
708
- reasons = {
709
- detail
710
- for _, result, detail in self._logged_links
711
- if result == LinkType.requires_python_mismatch
712
- }
713
- return sorted(reasons)
714
-
715
- def make_link_evaluator(self, project_name: str) -> LinkEvaluator:
716
- canonical_name = canonicalize_name(project_name)
717
- formats = self.format_control.get_allowed_formats(canonical_name)
718
-
719
- return LinkEvaluator(
720
- project_name=project_name,
721
- canonical_name=canonical_name,
722
- formats=formats,
723
- target_python=self._target_python,
724
- allow_yanked=self._allow_yanked,
725
- ignore_requires_python=self._ignore_requires_python,
726
- )
727
-
728
- def _sort_links(self, links: Iterable[Link]) -> List[Link]:
729
- """
730
- Returns elements of links in order, non-egg links first, egg links
731
- second, while eliminating duplicates
732
- """
733
- eggs, no_eggs = [], []
734
- seen: Set[Link] = set()
735
- for link in links:
736
- if link not in seen:
737
- seen.add(link)
738
- if link.egg_fragment:
739
- eggs.append(link)
740
- else:
741
- no_eggs.append(link)
742
- return no_eggs + eggs
743
-
744
- def _log_skipped_link(self, link: Link, result: LinkType, detail: str) -> None:
745
- entry = (link, result, detail)
746
- if entry not in self._logged_links:
747
- # Put the link at the end so the reason is more visible and because
748
- # the link string is usually very long.
749
- logger.debug("Skipping link: %s: %s", detail, link)
750
- self._logged_links.add(entry)
751
-
752
- def get_install_candidate(
753
- self, link_evaluator: LinkEvaluator, link: Link
754
- ) -> Optional[InstallationCandidate]:
755
- """
756
- If the link is a candidate for install, convert it to an
757
- InstallationCandidate and return it. Otherwise, return None.
758
- """
759
- result, detail = link_evaluator.evaluate_link(link)
760
- if result != LinkType.candidate:
761
- self._log_skipped_link(link, result, detail)
762
- return None
763
-
764
- return InstallationCandidate(
765
- name=link_evaluator.project_name,
766
- link=link,
767
- version=detail,
768
- )
769
-
770
- def evaluate_links(
771
- self, link_evaluator: LinkEvaluator, links: Iterable[Link]
772
- ) -> List[InstallationCandidate]:
773
- """
774
- Convert links that are candidates to InstallationCandidate objects.
775
- """
776
- candidates = []
777
- for link in self._sort_links(links):
778
- candidate = self.get_install_candidate(link_evaluator, link)
779
- if candidate is not None:
780
- candidates.append(candidate)
781
-
782
- return candidates
783
-
784
- def process_project_url(
785
- self, project_url: Link, link_evaluator: LinkEvaluator
786
- ) -> List[InstallationCandidate]:
787
- logger.debug(
788
- "Fetching project page and analyzing links: %s",
789
- project_url,
790
- )
791
- index_response = self._link_collector.fetch_response(project_url)
792
- if index_response is None:
793
- return []
794
-
795
- page_links = list(parse_links(index_response))
796
-
797
- with indent_log():
798
- package_links = self.evaluate_links(
799
- link_evaluator,
800
- links=page_links,
801
- )
802
-
803
- return package_links
804
-
805
- @functools.lru_cache(maxsize=None)
806
- def find_all_candidates(self, project_name: str) -> List[InstallationCandidate]:
807
- """Find all available InstallationCandidate for project_name
808
-
809
- This checks index_urls and find_links.
810
- All versions found are returned as an InstallationCandidate list.
811
-
812
- See LinkEvaluator.evaluate_link() for details on which files
813
- are accepted.
814
- """
815
- link_evaluator = self.make_link_evaluator(project_name)
816
-
817
- collected_sources = self._link_collector.collect_sources(
818
- project_name=project_name,
819
- candidates_from_page=functools.partial(
820
- self.process_project_url,
821
- link_evaluator=link_evaluator,
822
- ),
823
- )
824
-
825
- page_candidates_it = itertools.chain.from_iterable(
826
- source.page_candidates()
827
- for sources in collected_sources
828
- for source in sources
829
- if source is not None
830
- )
831
- page_candidates = list(page_candidates_it)
832
-
833
- file_links_it = itertools.chain.from_iterable(
834
- source.file_links()
835
- for sources in collected_sources
836
- for source in sources
837
- if source is not None
838
- )
839
- file_candidates = self.evaluate_links(
840
- link_evaluator,
841
- sorted(file_links_it, reverse=True),
842
- )
843
-
844
- if logger.isEnabledFor(logging.DEBUG) and file_candidates:
845
- paths = []
846
- for candidate in file_candidates:
847
- assert candidate.link.url # we need to have a URL
848
- try:
849
- paths.append(candidate.link.file_path)
850
- except Exception:
851
- paths.append(candidate.link.url) # it's not a local file
852
-
853
- logger.debug("Local files found: %s", ", ".join(paths))
854
-
855
- # This is an intentional priority ordering
856
- return file_candidates + page_candidates
857
-
858
- def make_candidate_evaluator(
859
- self,
860
- project_name: str,
861
- specifier: Optional[specifiers.BaseSpecifier] = None,
862
- hashes: Optional[Hashes] = None,
863
- ) -> CandidateEvaluator:
864
- """Create a CandidateEvaluator object to use."""
865
- candidate_prefs = self._candidate_prefs
866
- return CandidateEvaluator.create(
867
- project_name=project_name,
868
- target_python=self._target_python,
869
- prefer_binary=candidate_prefs.prefer_binary,
870
- allow_all_prereleases=candidate_prefs.allow_all_prereleases,
871
- specifier=specifier,
872
- hashes=hashes,
873
- )
874
-
875
- @functools.lru_cache(maxsize=None)
876
- def find_best_candidate(
877
- self,
878
- project_name: str,
879
- specifier: Optional[specifiers.BaseSpecifier] = None,
880
- hashes: Optional[Hashes] = None,
881
- ) -> BestCandidateResult:
882
- """Find matches for the given project and specifier.
883
-
884
- :param specifier: An optional object implementing `filter`
885
- (e.g. `packaging.specifiers.SpecifierSet`) to filter applicable
886
- versions.
887
-
888
- :return: A `BestCandidateResult` instance.
889
- """
890
- candidates = self.find_all_candidates(project_name)
891
- candidate_evaluator = self.make_candidate_evaluator(
892
- project_name=project_name,
893
- specifier=specifier,
894
- hashes=hashes,
895
- )
896
- return candidate_evaluator.compute_best_candidate(candidates)
897
-
898
- def find_requirement(
899
- self, req: InstallRequirement, upgrade: bool
900
- ) -> Optional[InstallationCandidate]:
901
- """Try to find a Link matching req
902
-
903
- Expects req, an InstallRequirement and upgrade, a boolean
904
- Returns a InstallationCandidate if found,
905
- Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise
906
- """
907
- hashes = req.hashes(trust_internet=False)
908
- best_candidate_result = self.find_best_candidate(
909
- req.name,
910
- specifier=req.specifier,
911
- hashes=hashes,
912
- )
913
- best_candidate = best_candidate_result.best_candidate
914
-
915
- installed_version: Optional[_BaseVersion] = None
916
- if req.satisfied_by is not None:
917
- installed_version = req.satisfied_by.version
918
-
919
- def _format_versions(cand_iter: Iterable[InstallationCandidate]) -> str:
920
- # This repeated parse_version and str() conversion is needed to
921
- # handle different vendoring sources from pip and pkg_resources.
922
- # If we stop using the pkg_resources provided specifier and start
923
- # using our own, we can drop the cast to str().
924
- return (
925
- ", ".join(
926
- sorted(
927
- {str(c.version) for c in cand_iter},
928
- key=parse_version,
929
- )
930
- )
931
- or "none"
932
- )
933
-
934
- if installed_version is None and best_candidate is None:
935
- logger.critical(
936
- "Could not find a version that satisfies the requirement %s "
937
- "(from versions: %s)",
938
- req,
939
- _format_versions(best_candidate_result.iter_all()),
940
- )
941
-
942
- raise DistributionNotFound(
943
- "No matching distribution found for {}".format(req)
944
- )
945
-
946
- def _should_install_candidate(
947
- candidate: Optional[InstallationCandidate],
948
- ) -> "TypeGuard[InstallationCandidate]":
949
- if installed_version is None:
950
- return True
951
- if best_candidate is None:
952
- return False
953
- return best_candidate.version > installed_version
954
-
955
- if not upgrade and installed_version is not None:
956
- if _should_install_candidate(best_candidate):
957
- logger.debug(
958
- "Existing installed version (%s) satisfies requirement "
959
- "(most up-to-date version is %s)",
960
- installed_version,
961
- best_candidate.version,
962
- )
963
- else:
964
- logger.debug(
965
- "Existing installed version (%s) is most up-to-date and "
966
- "satisfies requirement",
967
- installed_version,
968
- )
969
- return None
970
-
971
- if _should_install_candidate(best_candidate):
972
- logger.debug(
973
- "Using version %s (newest of versions: %s)",
974
- best_candidate.version,
975
- _format_versions(best_candidate_result.iter_applicable()),
976
- )
977
- return best_candidate
978
-
979
- # We have an existing version, and its the best version
980
- logger.debug(
981
- "Installed version (%s) is most up-to-date (past versions: %s)",
982
- installed_version,
983
- _format_versions(best_candidate_result.iter_applicable()),
984
- )
985
- raise BestVersionAlreadyInstalled
986
-
987
-
988
- def _find_name_version_sep(fragment: str, canonical_name: str) -> int:
989
- """Find the separator's index based on the package's canonical name.
990
-
991
- :param fragment: A <package>+<version> filename "fragment" (stem) or
992
- egg fragment.
993
- :param canonical_name: The package's canonical name.
994
-
995
- This function is needed since the canonicalized name does not necessarily
996
- have the same length as the egg info's name part. An example::
997
-
998
- >>> fragment = 'foo__bar-1.0'
999
- >>> canonical_name = 'foo-bar'
1000
- >>> _find_name_version_sep(fragment, canonical_name)
1001
- 8
1002
- """
1003
- # Project name and version must be separated by one single dash. Find all
1004
- # occurrences of dashes; if the string in front of it matches the canonical
1005
- # name, this is the one separating the name and version parts.
1006
- for i, c in enumerate(fragment):
1007
- if c != "-":
1008
- continue
1009
- if canonicalize_name(fragment[:i]) == canonical_name:
1010
- return i
1011
- raise ValueError(f"{fragment} does not match {canonical_name}")
1012
-
1013
-
1014
- def _extract_version_from_fragment(fragment: str, canonical_name: str) -> Optional[str]:
1015
- """Parse the version string from a <package>+<version> filename
1016
- "fragment" (stem) or egg fragment.
1017
-
1018
- :param fragment: The string to parse. E.g. foo-2.1
1019
- :param canonical_name: The canonicalized name of the package this
1020
- belongs to.
1021
- """
1022
- try:
1023
- version_start = _find_name_version_sep(fragment, canonical_name) + 1
1024
- except ValueError:
1025
- return None
1026
- version = fragment[version_start:]
1027
- if not version:
1028
- return None
1029
- return version
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/abc.py DELETED
@@ -1,33 +0,0 @@
1
- from abc import ABC
2
-
3
-
4
- class RichRenderable(ABC):
5
- """An abstract base class for Rich renderables.
6
-
7
- Note that there is no need to extend this class, the intended use is to check if an
8
- object supports the Rich renderable protocol. For example::
9
-
10
- if isinstance(my_object, RichRenderable):
11
- console.print(my_object)
12
-
13
- """
14
-
15
- @classmethod
16
- def __subclasshook__(cls, other: type) -> bool:
17
- """Check if this class supports the rich render protocol."""
18
- return hasattr(other, "__rich_console__") or hasattr(other, "__rich__")
19
-
20
-
21
- if __name__ == "__main__": # pragma: no cover
22
- from pip._vendor.rich.text import Text
23
-
24
- t = Text()
25
- print(isinstance(Text, RichRenderable))
26
- print(isinstance(t, RichRenderable))
27
-
28
- class Foo:
29
- pass
30
-
31
- f = Foo()
32
- print(isinstance(f, RichRenderable))
33
- print(isinstance("", RichRenderable))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Audio-AGI/WavJourney/scripts/kill_services.py DELETED
@@ -1,11 +0,0 @@
1
- import os
2
-
3
- # Extract values for each application
4
- service_port = os.environ.get('WAVJOURNEY_SERVICE_PORT')
5
-
6
- # Execute the commands
7
- os.system(f'kill $(lsof -t -i :{service_port})')
8
-
9
-
10
-
11
-
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Audio-AGI/WavJourney/share_btn.py DELETED
@@ -1,74 +0,0 @@
1
- community_icon_html = """<svg id="share-btn-share-icon" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32">
2
- <path d="M20.6081 3C21.7684 3 22.8053 3.49196 23.5284 4.38415C23.9756 4.93678 24.4428 5.82749 24.4808 7.16133C24.9674 7.01707 25.4353 6.93643 25.8725 6.93643C26.9833 6.93643 27.9865 7.37587 28.696 8.17411C29.6075 9.19872 30.0124 10.4579 29.8361 11.7177C29.7523 12.3177 29.5581 12.8555 29.2678 13.3534C29.8798 13.8646 30.3306 14.5763 30.5485 15.4322C30.719 16.1032 30.8939 17.5006 29.9808 18.9403C30.0389 19.0342 30.0934 19.1319 30.1442 19.2318C30.6932 20.3074 30.7283 21.5229 30.2439 22.6548C29.5093 24.3704 27.6841 25.7219 24.1397 27.1727C21.9347 28.0753 19.9174 28.6523 19.8994 28.6575C16.9842 29.4379 14.3477 29.8345 12.0653 29.8345C7.87017 29.8345 4.8668 28.508 3.13831 25.8921C0.356375 21.6797 0.754104 17.8269 4.35369 14.1131C6.34591 12.058 7.67023 9.02782 7.94613 8.36275C8.50224 6.39343 9.97271 4.20438 12.4172 4.20438H12.4179C12.6236 4.20438 12.8314 4.2214 13.0364 4.25468C14.107 4.42854 15.0428 5.06476 15.7115 6.02205C16.4331 5.09583 17.134 4.359 17.7682 3.94323C18.7242 3.31737 19.6794 3 20.6081 3ZM20.6081 5.95917C20.2427 5.95917 19.7963 6.1197 19.3039 6.44225C17.7754 7.44319 14.8258 12.6772 13.7458 14.7131C13.3839 15.3952 12.7655 15.6837 12.2086 15.6837C11.1036 15.6837 10.2408 14.5497 12.1076 13.1085C14.9146 10.9402 13.9299 7.39584 12.5898 7.1776C12.5311 7.16799 12.4731 7.16355 12.4172 7.16355C11.1989 7.16355 10.6615 9.33114 10.6615 9.33114C10.6615 9.33114 9.0863 13.4148 6.38031 16.206C3.67434 18.998 3.5346 21.2388 5.50675 24.2246C6.85185 26.2606 9.42666 26.8753 12.0653 26.8753C14.8021 26.8753 17.6077 26.2139 19.1799 25.793C19.2574 25.7723 28.8193 22.984 27.6081 20.6107C27.4046 20.212 27.0693 20.0522 26.6471 20.0522C24.9416 20.0522 21.8393 22.6726 20.5057 22.6726C20.2076 22.6726 19.9976 22.5416 19.9116 22.222C19.3433 20.1173 28.552 19.2325 27.7758 16.1839C27.639 15.6445 27.2677 15.4256 26.746 15.4263C24.4923 15.4263 19.4358 19.5181 18.3759 19.5181C18.2949 19.5181 18.2368 19.4937 18.2053 19.4419C17.6743 18.557 17.9653 17.9394 21.7082 15.6009C25.4511 13.2617 28.0783 11.8545 26.5841 10.1752C26.4121 9.98141 26.1684 9.8956 25.8725 9.8956C23.6001 9.89634 18.2311 14.9403 18.2311 14.9403C18.2311 14.9403 16.7821 16.496 15.9057 16.496C15.7043 16.496 15.533 16.4139 15.4169 16.2112C14.7956 15.1296 21.1879 10.1286 21.5484 8.06535C21.7928 6.66715 21.3771 5.95917 20.6081 5.95917Z" fill="#FF9D00"></path>
3
- <path d="M5.50686 24.2246C3.53472 21.2387 3.67446 18.9979 6.38043 16.206C9.08641 13.4147 10.6615 9.33111 10.6615 9.33111C10.6615 9.33111 11.2499 6.95933 12.59 7.17757C13.93 7.39581 14.9139 10.9401 12.1069 13.1084C9.29997 15.276 12.6659 16.7489 13.7459 14.713C14.8258 12.6772 17.7747 7.44316 19.304 6.44221C20.8326 5.44128 21.9089 6.00204 21.5484 8.06532C21.188 10.1286 14.795 15.1295 15.4171 16.2118C16.0391 17.2934 18.2312 14.9402 18.2312 14.9402C18.2312 14.9402 25.0907 8.49588 26.5842 10.1752C28.0776 11.8545 25.4512 13.2616 21.7082 15.6008C17.9646 17.9393 17.6744 18.557 18.2054 19.4418C18.7372 20.3266 26.9998 13.1351 27.7759 16.1838C28.5513 19.2324 19.3434 20.1173 19.9117 22.2219C20.48 24.3274 26.3979 18.2382 27.6082 20.6107C28.8193 22.9839 19.2574 25.7722 19.18 25.7929C16.0914 26.62 8.24723 28.3726 5.50686 24.2246Z" fill="#FFD21E"></path>
4
- </svg>"""
5
-
6
- loading_icon_html = """<svg id="share-btn-loading-icon" style="display:none;" class="animate-spin"
7
- style="color: #ffffff;
8
- "
9
- xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" fill="none" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><circle style="opacity: 0.25;" cx="12" cy="12" r="10" stroke="white" stroke-width="4"></circle><path style="opacity: 0.75;" fill="white" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path></svg>"""
10
-
11
- share_js = """async () => {
12
- async function uploadFile(file){
13
- const UPLOAD_URL = 'https://huggingface.co/uploads';
14
- const response = await fetch(UPLOAD_URL, {
15
- method: 'POST',
16
- headers: {
17
- 'Content-Type': file.type,
18
- 'X-Requested-With': 'XMLHttpRequest',
19
- },
20
- body: file, /// <- File inherits from Blob
21
- });
22
- const url = await response.text();
23
- return url;
24
- }
25
- async function getInputVideoFile(videoEl){
26
- const res = await fetch(videoEl.src);
27
- const blob = await res.blob();
28
- const videoId = Date.now() % 200;
29
- const fileName = `sd-perception-${videoId}.mp4`;
30
- return new File([blob], fileName, { type: 'video/mp4' });
31
- }
32
-
33
- async function audioToBase64(audioFile) {
34
- return new Promise((resolve, reject) => {
35
- let reader = new FileReader();
36
- reader.readAsDataURL(audioFile);
37
- reader.onload = () => resolve(reader.result);
38
- reader.onerror = error => reject(error);
39
-
40
- });
41
- }
42
- const gradioEl = document.querySelector("gradio-app").shadowRoot || document.querySelector('body > gradio-app');
43
- const inputPromptEl = gradioEl.querySelector('#prompt-in textarea').value;
44
- const outputVideoEl = gradioEl.querySelector('#output-video video');
45
-
46
- let titleTxt = `WavJourney: ${inputPromptEl}`;
47
-
48
- const shareBtnEl = gradioEl.querySelector('#share-btn');
49
- const shareIconEl = gradioEl.querySelector('#share-btn-share-icon');
50
- const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon');
51
- if(!outputVideoEl){
52
- return;
53
- };
54
- shareBtnEl.style.pointerEvents = 'none';
55
- shareIconEl.style.display = 'none';
56
- loadingIconEl.style.removeProperty('display');
57
- const outputVideo = await getInputVideoFile(outputVideoEl);
58
- const urlOutputVideo = await uploadFile(outputVideo);
59
-
60
- const descriptionMd = `
61
- ##### ${inputPromptEl}
62
-
63
- ${urlOutputVideo}
64
- `;
65
- const params = new URLSearchParams({
66
- title: titleTxt,
67
- description: descriptionMd,
68
- });
69
- const paramsStr = params.toString();
70
- window.open(`https://huggingface.co/spaces/Audio-AGI/WavJourney/discussions/new?${paramsStr}`, '_blank');
71
- shareBtnEl.style.removeProperty('pointer-events');
72
- shareIconEl.style.removeProperty('display');
73
- loadingIconEl.style.display = 'none';
74
- }"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awesimo/jojogan/e4e/criteria/lpips/utils.py DELETED
@@ -1,30 +0,0 @@
1
- from collections import OrderedDict
2
-
3
- import torch
4
-
5
-
6
- def normalize_activation(x, eps=1e-10):
7
- norm_factor = torch.sqrt(torch.sum(x ** 2, dim=1, keepdim=True))
8
- return x / (norm_factor + eps)
9
-
10
-
11
- def get_state_dict(net_type: str = 'alex', version: str = '0.1'):
12
- # build url
13
- url = 'https://raw.githubusercontent.com/richzhang/PerceptualSimilarity/' \
14
- + f'master/lpips/weights/v{version}/{net_type}.pth'
15
-
16
- # download
17
- old_state_dict = torch.hub.load_state_dict_from_url(
18
- url, progress=True,
19
- map_location=None if torch.cuda.is_available() else torch.device('cpu')
20
- )
21
-
22
- # rename keys
23
- new_state_dict = OrderedDict()
24
- for key, val in old_state_dict.items():
25
- new_key = key
26
- new_key = new_key.replace('lin', '')
27
- new_key = new_key.replace('model.', '')
28
- new_state_dict[new_key] = val
29
-
30
- return new_state_dict
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/julius/fftconv.py DELETED
@@ -1,183 +0,0 @@
1
- # File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details.
2
- # Author: adefossez, 2020
3
-
4
- """
5
- Implementation of a FFT based 1D convolution in PyTorch.
6
- While FFT is used in CUDNN for small kernel sizes, it is not the case for long ones, e.g. 512.
7
- This module implements efficient FFT based convolutions for such convolutions. A typical
8
- application is for evaluationg FIR filters with a long receptive field, typically
9
- evaluated with a stride of 1.
10
- """
11
- from typing import Optional
12
-
13
- import torch
14
- try:
15
- import torch.fft as new_fft
16
- except ImportError:
17
- new_fft = None # type: ignore
18
- from torch.nn import functional as F
19
-
20
- from .core import pad_to, unfold
21
- from .utils import simple_repr
22
-
23
-
24
- # This is quite verbose, but sadly needed to make TorchScript happy.
25
- def _new_rfft(x: torch.Tensor):
26
- z = new_fft.rfft(x, dim=-1)
27
- return torch.view_as_real(z)
28
-
29
-
30
- def _old_rfft(x: torch.Tensor):
31
- return torch.rfft(x, 1) # type: ignore
32
-
33
-
34
- def _old_irfft(x: torch.Tensor, length: int):
35
- result = torch.irfft(x, 1, signal_sizes=(length,)) # type: ignore
36
- return result
37
-
38
-
39
- def _new_irfft(x: torch.Tensor, length: int):
40
- x = torch.view_as_complex(x)
41
- return new_fft.irfft(x, length, dim=-1)
42
-
43
-
44
- if new_fft is None:
45
- _rfft = _old_rfft
46
- _irfft = _old_irfft
47
- else:
48
- _rfft = _new_rfft
49
- _irfft = _new_irfft
50
-
51
-
52
- def _compl_mul_conjugate(a: torch.Tensor, b: torch.Tensor):
53
- """
54
- Given a and b two tensors of dimension 4
55
- with the last dimension being the real and imaginary part,
56
- returns a multiplied by the conjugate of b, the multiplication
57
- being with respect to the second dimension.
58
-
59
- """
60
- # PyTorch 1.7 supports complex number, but not for all operations.
61
- # Once the support is widespread, this can likely go away.
62
-
63
- op = "bcft,dct->bdft"
64
- return torch.stack([
65
- torch.einsum(op, a[..., 0], b[..., 0]) + torch.einsum(op, a[..., 1], b[..., 1]),
66
- torch.einsum(op, a[..., 1], b[..., 0]) - torch.einsum(op, a[..., 0], b[..., 1])
67
- ],
68
- dim=-1)
69
-
70
-
71
- def fft_conv1d(
72
- input: torch.Tensor, weight: torch.Tensor,
73
- bias: Optional[torch.Tensor] = None, stride: int = 1, padding: int = 0,
74
- block_ratio: float = 5):
75
- """
76
- Same as `torch.nn.functional.conv1d` but using FFT for the convolution.
77
- Please check PyTorch documentation for more information.
78
-
79
- Args:
80
- input (Tensor): input signal of shape `[B, C, T]`.
81
- weight (Tensor): weight of the convolution `[D, C, K]` with `D` the number
82
- of output channels.
83
- bias (Tensor or None): if not None, bias term for the convolution.
84
- stride (int): stride of convolution.
85
- padding (int): padding to apply to the input.
86
- block_ratio (float): can be tuned for speed. The input is splitted in chunks
87
- with a size of `int(block_ratio * kernel_size)`.
88
-
89
- Shape:
90
-
91
- - Inputs: `input` is `[B, C, T]`, `weight` is `[D, C, K]` and bias is `[D]`.
92
- - Output: `(*, T)`
93
-
94
-
95
- ..note::
96
- This function is faster than `torch.nn.functional.conv1d` only in specific cases.
97
- Typically, the kernel size should be of the order of 256 to see any real gain,
98
- for a stride of 1.
99
-
100
- ..Warning::
101
- Dilation and groups are not supported at the moment. This function might use
102
- more memory than the default Conv1d implementation.
103
- """
104
- input = F.pad(input, (padding, padding))
105
- batch, channels, length = input.shape
106
- out_channels, _, kernel_size = weight.shape
107
-
108
- if length < kernel_size:
109
- raise RuntimeError(f"Input should be at least as large as the kernel size {kernel_size}, "
110
- f"but it is only {length} samples long.")
111
- if block_ratio < 1:
112
- raise RuntimeError("Block ratio must be greater than 1.")
113
-
114
- # We are going to process the input blocks by blocks, as for some reason it is faster
115
- # and less memory intensive (I think the culprit is `torch.einsum`.
116
- block_size: int = min(int(kernel_size * block_ratio), length)
117
- fold_stride = block_size - kernel_size + 1
118
- weight = pad_to(weight, block_size)
119
- weight_z = _rfft(weight)
120
-
121
- # We pad the input and get the different frames, on which
122
- frames = unfold(input, block_size, fold_stride)
123
-
124
- frames_z = _rfft(frames)
125
- out_z = _compl_mul_conjugate(frames_z, weight_z)
126
- out = _irfft(out_z, block_size)
127
- # The last bit is invalid, because FFT will do a circular convolution.
128
- out = out[..., :-kernel_size + 1]
129
- out = out.reshape(batch, out_channels, -1)
130
- out = out[..., ::stride]
131
- target_length = (length - kernel_size) // stride + 1
132
- out = out[..., :target_length]
133
- if bias is not None:
134
- out += bias[:, None]
135
- return out
136
-
137
-
138
- class FFTConv1d(torch.nn.Module):
139
- """
140
- Same as `torch.nn.Conv1d` but based on `fft_conv1d`.
141
- Please check PyTorch documentation for more information.
142
-
143
- Args:
144
- in_channels (int): number of input channels.
145
- out_channels (int): number of output channels.
146
- kernel_size (int): kernel size of convolution.
147
- stride (int): stride of convolution.
148
- padding (int): padding to apply to the input.
149
- bias (bool): if True, use a bias term.
150
-
151
- ..note::
152
- This module is faster than `torch.nn.Conv1d` only in specific cases.
153
- Typically, `kernel_size` should be of the order of 256 to see any real gain,
154
- for a stride of 1.
155
-
156
- ..warning::
157
- Dilation and groups are not supported at the moment. This module might use
158
- more memory than the default Conv1d implementation.
159
-
160
- >>> fftconv = FFTConv1d(12, 24, 128, 4)
161
- >>> x = torch.randn(4, 12, 1024)
162
- >>> print(list(fftconv(x).shape))
163
- [4, 24, 225]
164
- """
165
- def __init__(self, in_channels: int, out_channels: int, kernel_size: int,
166
- stride: int = 1, padding: int = 0, bias: bool = True):
167
- super().__init__()
168
- self.in_channels = in_channels
169
- self.out_channels = out_channels
170
- self.kernel_size = kernel_size
171
- self.stride = stride
172
- self.padding = padding
173
-
174
- conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size, bias=bias)
175
- self.weight = conv.weight
176
- self.bias = conv.bias
177
-
178
- def forward(self, input: torch.Tensor):
179
- return fft_conv1d(
180
- input, self.weight, self.bias, self.stride, self.padding)
181
-
182
- def __repr__(self):
183
- return simple_repr(self, overrides={"bias": self.bias is not None})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Aqu Vamos Apk Versin Antigua.md DELETED
@@ -1,107 +0,0 @@
1
-
2
- <h1>Go Navigation APK: Una aplicación complementaria para Google Maps Go</h1>
3
- <p>Si usted está buscando una aplicación de navegación ligera y rápida para su dispositivo Android, es posible que desee echa un vistazo a Go Navigation APK. Esta es una aplicación complementaria que funciona con Google Maps Go, una versión web de Google Maps que está diseñada para dispositivos de gama baja y conexiones a Internet lentas. En este artículo, explicaremos lo que es Go Navigation APK, cómo descargarlo e instalarlo, cómo usarlo con Google Maps Go, y qué beneficios ofrece. También lo compararemos con otras aplicaciones de navegación populares y responderemos algunas preguntas frecuentes al respecto. </p>
4
- <h2>¿Qué es Go Navigation APK? </h2>
5
- <p>Go Navigation APK es una aplicación que proporciona en tiempo real, giro a giro de navegación para Google Maps Go. No es una aplicación independiente, sino una aplicación complementaria que funciona junto con Google Maps Go. Puede usarlo para obtener indicaciones y guía de voz para conducir, caminar, andar en bicicleta o en transporte público. También es compatible con mapas fuera de línea y direcciones, por lo que puede navegar sin conexión a Internet. </p>
6
- <h2>aquí vamos apk versión antigua</h2><br /><p><b><b>Download File</b> >>> <a href="https://bltlly.com/2v6LLw">https://bltlly.com/2v6LLw</a></b></p><br /><br />
7
- <h3>Características de Go Navigation APK</h3>
8
- <p>Algunas de las características de Go Navigation APK son:</p>
9
- <ul>
10
- <li>Es pequeño en tamaño y consume menos datos y batería que la aplicación completa de Google Maps. </li>
11
- <li>Soporta múltiples idiomas y regiones. </li>
12
- <li> Proporciona información y alertas de tráfico precisas y actualizadas. </li>
13
- <li>Le permite personalizar sus preferencias de ruta, como evitar peajes, autopistas o transbordadores. </li>
14
- <li>Muestra lugares de interés cercanos, como gasolineras, restaurantes u hoteles. </li>
15
- <li>Se integra con otros servicios de Google, como Google Assistant, Google Calendar o Contactos de Google.</li>
16
- </ul>
17
- <h3>Cómo descargar e instalar Ir navegación APK</h3>
18
- <p>Para descargar e instalar Go Navigation APK en su dispositivo Android, debe seguir estos pasos:</p>
19
- <ol>
20
-
21
- <li>Descargar el archivo APK Go Navigation de una fuente de confianza, como <a href="">APKPure</a>. </li>
22
- <li>Habilitar la instalación de aplicaciones de fuentes desconocidas en la configuración del dispositivo. </li>
23
- <li>Busque el archivo APK descargado en su dispositivo y toque en él para instalarlo. </li>
24
- <li>Inicie la aplicación y otorgue los permisos necesarios para acceder a su ubicación, micrófono y almacenamiento. </li>
25
- </ol>
26
- <h3> Cómo utilizar Go navegación APK con Google Maps Go</h3>
27
- <p>Para utilizar Go Navigation APK con Google Maps Go, debe seguir estos pasos:</p>
28
- <ol>
29
- <li>Abrir Google Maps Vaya en su dispositivo y busque su destino. </li>
30
- <li>Toque en el botón Direcciones en la parte inferior de la pantalla. </li>
31
- <li>Seleccione su modo de transporte (coche, bicicleta, a pie o de tránsito). </li>
32
- <li> Toque en el botón de navegación en la esquina superior derecha de la pantalla. </li>
33
- <li>Esto iniciará la aplicación Go Navigation y comenzará a guiarlo a su destino con instrucciones de voz y señales visuales. </li>
34
- <li>También puede acceder a otras opciones, como silenciar la guía de voz, cambiar la vista del mapa o cancelar la navegación. </li>
35
- </ol>
36
- <h2>Beneficios de usar Go Navigation APK</h2>
37
- <p>Usando Go Navigation APK tiene varios beneficios sobre el uso de otras aplicaciones de navegación o la aplicación completa de Google Maps. Algunos de estos beneficios son:</p>
38
- <h3>Guardar datos y espacio de almacenamiento</h3>
39
- <p>Go Navigation APK está diseñado para ser ligero y eficiente. Solo ocupa unos 7 MB de espacio de almacenamiento en su dispositivo, en comparación con la aplicación completa de Google Maps que ocupa unos 200 MB. También utiliza menos datos que la aplicación completa de Google Maps, ya que solo descarga los datos esenciales del mapa y la guía de voz. Esto puede ayudarle a ahorrar dinero en su plan de datos y evitar quedarse sin datos cuando más lo necesita. </p>
40
- <h3>Obtener navegación en tiempo real, giro a giro</h3>
41
-
42
- <h3>Acceder a mapas offline y direcciones</h3>
43
- <p>Una de las mejores características de Go Navigation APK es que le permite acceder a mapas fuera de línea y direcciones. Esto significa que puede navegar sin conexión a Internet, lo que puede ser muy útil en áreas con poca o ninguna cobertura de red, o cuando desea ahorrar datos o batería. Puede descargar mapas sin conexión para cualquier región o país que desee, y se almacenarán en su dispositivo durante un máximo de 30 días. También puedes obtener instrucciones fuera de línea para conducir, caminar o andar en bicicleta, siempre y cuando las hayas buscado antes de desconectarte. </p>
44
- <p></p>
45
- <h2>Comparación de Go Navigation APK con otras aplicaciones de navegación</h2>
46
- <p>Hay muchas otras aplicaciones de navegación disponibles para dispositivos Android, como Google Maps, Waze o HERE WeGo. ¿Cómo se compara Go Navigation APK con ellos? Aquí hay una tabla que resume algunas de las principales diferencias y similitudes entre estas aplicaciones:</p>
47
- <tabla>
48
- <tr>
49
- <th>Aplicación</th>
50
- <th>Tamaño</th>
51
- <th>Uso de datos</th>
52
- <th>Mapas offline</th>
53
- <th>Información de tráfico</th>
54
- <th>Guía de voz</th>
55
- <th>Otras características</th>
56
- </tr>
57
- <tr>
58
- <td>Ir navegación APK</td>
59
- <td>7 MB</td>
60
- <td>Baja</td>
61
- <td>Sí</td>
62
- <td>Sí</td>
63
- <td>Sí</td>
64
- <td>Funciona con Google Maps Go, soporta múltiples idiomas y regiones, se integra con otros servicios de Google. </td>
65
- </tr>
66
- <tr>
67
- <td>Google Maps</td>
68
- <td>200 MB</td>
69
- <td>Alta</td>
70
- <td>Sí</td>
71
- <td>Sí</td>
72
- <td>Sí</td>
73
- <td>Ofrece vista a la calle, vista satélite, mapas interiores, vista en vivo, horarios de tránsito, rutas de bicicletas, etc.</td>
74
- </tr>
75
- <tr>
76
- <td>Waze</td>
77
- <td>80 MB</td>
78
- <td>Medio</td>
79
- <td>No</td>
80
- <td>Sí</td>
81
- <td>Sí</td>
82
- <td>Le permite informar y ver los peligros de la carretera, la presencia de la policía, los accidentes, etc., ofrece características sociales, tales como compartir el coche, chat o puntos. </td>
83
- </tr>
84
- <tr>
85
- <td>AQUÍ WeGo</td>
86
- <td>40 MB</td <td>Baja</td <td>Sí</td>
87
- <td>Sí</td>
88
- <td>Sí</td>
89
- <td>Incluye información de transporte público, tarifas de taxi, opciones para compartir autos, rutas de bicicletas, etc.</td>
90
- </tr>
91
-
92
- <p>Si tiene alguna pregunta sobre Go Navigation APK, puede encontrar las respuestas a continuación:</p>
93
- <h3>Es Go Navigation APK seguro de usar? </h3>
94
- <p>Sí, Ir navegación APK es seguro de usar, siempre y cuando se descarga desde una fuente de confianza. No contiene ningún malware o virus que pueda dañar su dispositivo o comprometer su privacidad. Tampoco requiere permisos innecesarios o acceso a sus datos personales. </p>
95
- <h3>¿Es Go Navigation APK libre de usar? </h3>
96
- <p>Sí, Go Navigation APK es de uso gratuito y no tiene ninguna compra en la aplicación o anuncios. Sin embargo, es posible que incurra en algunos cargos de datos de su proveedor de red al usar la aplicación en línea. Puedes evitar esto usando mapas y direcciones offline siempre que sea posible. </p>
97
- <h3>¿Funciona Go Navigation APK en cualquier dispositivo Android? </h3>
98
- <p>No, Go Navigation APK solo funciona en dispositivos Android que tienen instalado Google Maps Go. Esto se debe a que es una aplicación complementaria que se basa en Google Maps Go para su funcionalidad. Google Maps Go es compatible con dispositivos Android que se ejecutan en Android 4.4 KitKat o superior y tienen al menos 512 MB de RAM.</p>
99
- <h3>¿Puedo usar Go navegación APK sin Google Maps Go? </h3>
100
- <p>No, no se puede utilizar Go Navigation APK sin Google Maps Go. Necesitas tener ambas aplicaciones instaladas en tu dispositivo y usarlas juntas para obtener servicios de navegación. Si intenta utilizar Go Navigation APK sin Google Maps Go, obtendrá un mensaje de error que dice "Por favor, instalar Google Maps Go primero". </p>
101
- <h3>¿Cómo puedo actualizar Go Navigation APK? </h3>
102
- <p>Puede actualizar Go Navigation APK siguiendo los mismos pasos que lo hizo para descargar e instalar. Es necesario descargar la última versión del archivo APK de una fuente de confianza e instalarlo en su dispositivo. También puedes recibir notificaciones cuando haya una nueva actualización disponible para la aplicación. </p>
103
- <h2>Conclusión</h2>
104
-
105
- <p>Espero que hayas encontrado este artículo útil e informativo. Si tienes algún comentario o preguntas, por favor no dudes en dejar un comentario a continuación. ¡Gracias por leer! </p> 64aa2da5cf<br />
106
- <br />
107
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar El Juego Growtopia Mod Apk.md DELETED
@@ -1,62 +0,0 @@
1
- <br />
2
- <h1>Descargar juego Growtopia Mod Apk: Una guía para principiantes</h1>
3
- <p>Si usted está buscando un juego divertido y creativo que le permite construir su propio mundo, entonces es posible que desee probar Growtopia. Growtopia es un popular sandbox MMO juego que tiene millones de jugadores en todo el mundo. Pero lo que si quieres disfrutar del juego sin gastar dinero real o enfrentar limitaciones? Ahí es donde Growtopia Mod Apk entra en juego. En este artículo, le diremos todo lo que necesita saber sobre Growtopia Mod Apk, incluyendo lo que es, cómo descargar e instalar, ¿cuáles son sus beneficios y riesgos, y más. Así que, vamos a empezar! </p>
4
- <h2>¿Qué es Growtopia? </h2>
5
- <h3>Un juego de MMO con un sinfín de posibilidades</h3>
6
- <p>Growtopia es un juego que te permite crear y explorar todo lo que puedas imaginar. Puedes construir tu propio mundo, desde castillos y mazmorras hasta naves espaciales y rascacielos. También puedes colaborar con otros jugadores, intercambiar artículos, jugar minijuegos, participar en eventos y competir en tablas de clasificación. No hay reglas ni límites en Growtopia, solo tu imaginación. </p>
7
- <h2>descargar el juego growtopia mod apk</h2><br /><p><b><b>DOWNLOAD</b> ->>->>->> <a href="https://bltlly.com/2v6L2z">https://bltlly.com/2v6L2z</a></b></p><br /><br />
8
- <h3>Una plataforma creativa y social con millones de jugadores</h3>
9
- <p>Growtopia no es solo un juego, sino también una comunidad de jugadores apasionados y amigables. Puedes chatear con otros jugadores, hacer amigos, unirte a gremios e incluso crear tu propio contenido. También puedes visitar mundos de otros jugadores, admirar sus creaciones y aprender de ellas. Growtopia es un lugar donde puedes expresarte, compartir tus ideas y divertirte. </p>
10
- <h2>¿Qué es Growtopia Mod Apk? </h2>
11
- <h3>Una versión modificada del juego original con características adicionales</h3>
12
- <p>Growtopia Mod Apk es una versión del juego que ha sido modificada por algunos desarrolladores para agregar características adicionales que no están disponibles en el juego original. Estas características pueden incluir gemas ilimitadas, recursos, objetos, mundos y más. Con Growtopia Mod Apk, se puede disfrutar del juego sin restricciones o limitaciones. </p>
13
- <h3>Una manera de disfrutar del juego sin gastar dinero real</h3>
14
-
15
- <h2>Cómo descargar e instalar Growtopia Mod Apk? </h2>
16
- <h3>Paso 1: Encontrar una fuente confiable para el archivo apk mod</h3>
17
- <p>El primer paso para descargar Growtopia Mod Apk es encontrar una fuente confiable para el archivo apk mod. Hay muchos sitios web que ofrecen archivos apk mod para varios juegos, pero no todos ellos son seguros y de confianza. <p>Antes de descargar cualquier archivo apk mod, siempre debe comprobar su seguridad y seguridad. Hay muchas maneras de hacerlo, pero estas son algunas de las más comunes y confiables:</p>
18
- <h3>Paso 1: Encontrar una fuente confiable para el archivo apk mod</h3>
19
- <p>El primer paso para descargar Growtopia Mod Apk es encontrar una fuente confiable para el archivo apk mod. Hay muchos sitios web que ofrecen archivos apk mod para varios juegos, pero no todos ellos son seguros y de confianza. Algunos de ellos pueden contener malware, virus o archivos falsos que pueden dañar su dispositivo o robar sus datos. Para evitar estos riesgos, solo debe descargar archivos apk mod de sitios de buena reputación y verificados. Algunos de los mejores sitios para las descargas seguras de Android APK son APKMirror, APKPure, y Apkmonk. Estos sitios tienen políticas y procedimientos estrictos para asegurar que los archivos apk mod que proporcionan están libres de cualquier código o contenido malicioso. También actualizan sus archivos regularmente para que coincidan con las últimas versiones de los juegos. </p>
20
- <h3>Paso 2: Compruebe el hash del archivo apk mod</h3>
21
- <p>El segundo paso para descargar Growtopia Mod Apk es comprobar el hash del archivo mod apk. El hash es un tipo de huella digital que garantiza que el archivo no se modifique o manipule. Puede utilizar una herramienta como Hash Droid para comprobar y comparar los hashes del archivo apk mod que descargó y el archivo de juego original de Google Play. Si los hashes coinciden, significa que el archivo apk mod es auténtico y seguro. Si no coinciden, significa que el archivo apk mod ha sido alterado o dañado y puede contener elementos dañinos. </p>
22
- <h3>Paso 3: Escanear el archivo apk mod con una aplicación antivirus</h3>
23
-
24
- <p>El cuarto y último paso para descargar Growtopia Mod Apk es descargar e instalar el archivo mod apk en su dispositivo. Una vez que haya verificado que el archivo apk mod es seguro, puede proceder a descargarlo e instalarlo. Estos son los pasos para hacerlo:</p>
25
- <ul>
26
- <li>Ir a la página web donde se descargó el archivo apk mod y toque en el botón de descarga. </li>
27
- <li>Esperar a que la descarga se complete y luego localizar el archivo apk mod en el almacenamiento de su dispositivo. </li>
28
- <li>Toque en el archivo apk mod y seleccione "Instalar". Es posible que tenga que conceder algunos permisos para permitir la instalación. </li>
29
- <li>Espera a que la instalación termine y luego toca "Abrir". </li>
30
- <li> Iniciar el juego y disfrutar de las características de mod. </li>
31
- </ul>
32
- <h2>¿Cuáles son los beneficios de Growtopia Mod Apk? </h2>
33
- <p>Ahora que usted sabe cómo descargar e instalar Growtopia Mod Apk, es posible que se pregunte cuáles son los beneficios de su uso. Bueno, hay muchos beneficios de usar Growtopia Mod Apk, pero aquí están algunos de los más notables:</p>
34
- <h3>Gemas y recursos ilimitados para construir lo que quieras</h3>
35
- <p>Uno de los principales beneficios de Growtopia Mod Apk es que le da gemas ilimitadas y recursos para construir lo que quieras. Las gemas son la moneda principal en Growtopia, y se utilizan para comprar artículos, acceder a mundos premium, desbloquear funciones y más. Los recursos son los materiales que utiliza para crear su propio mundo, tales como bloques, semillas, herramientas, etc. Con Growtopia Mod Apk, usted no tiene que preocuparse por quedarse sin gemas o recursos. Puedes conseguir tantos como quieras gratis y usarlos para construir tu mundo de ensueño. </p>
36
- <p></p>
37
- <h3>Acceso a artículos y mundos premium sin restricciones</h3>
38
-
39
- <p>Un tercer beneficio de Growtopia Mod Apk es que mejora la jugabilidad y los gráficos para una mejor experiencia. Growtopia Mod Apk tiene algunas características que mejoran el rendimiento y la calidad del juego, tales como una carga más rápida, controles más suaves, mayor resolución, etc. Estas características hacen el juego más agradable y envolvente. También puedes personalizar la configuración del juego para adaptarla a tus preferencias, como ajustar el sonido, el brillo, el idioma, etc.</p>
40
- <h2>¿Cuáles son los riesgos de crecimiento Mod Apk? </h2>
41
- <p>Mientras que Growtopia Mod Apk tiene muchos beneficios, también tiene algunos riesgos que usted debe ser consciente de. El uso de Growtopia Mod Apk puede exponer a algunos peligros y consecuencias, tales como:</p>
42
- <h3>Malware y virus potenciales de fuentes no confiables</h3>
43
- <p>Uno de los riesgos de Growtopia Mod Apk es que puede contener malware y virus de fuentes no confiables. Como mencionamos anteriormente, no todos los sitios web que ofrecen archivos apk mod son seguros y confiables. Algunos de ellos pueden tener malware y virus ocultos que pueden infectar su dispositivo o robar sus datos. Estos malware y virus pueden causar graves daños a su dispositivo, como ralentizarlo, agotar la batería, eliminar archivos, etc. También pueden comprometer su privacidad y seguridad, como acceder a sus contactos, mensajes, fotos, etc. Para evitar este riesgo, siempre debe descargar archivos apk mod de sitios de buena reputación y verificados, comprobar sus hashes, y escanearlos con una aplicación antivirus. </p>
44
- <h3>Posible prohibición de cuenta o suspensión de los desarrolladores de juegos</h3>
45
-
46
- <p>Un tercer riesgo de Growtopia Mod Apk es que puede causar que pierda su progreso y los datos si el apk mod no se actualiza regularmente. Growtopia Mod Apk no es una versión oficial del juego, y puede no ser compatible con las últimas actualizaciones y parches de los desarrolladores de juegos. Si utiliza un archivo apk mod obsoleto o incompatible, puede encontrar algunos errores o fallos en el juego, tales como estrellarse, congelación, retraso, etc. También puede perder su progreso y los datos si el archivo apk mod está dañado o eliminado. Para evitar este riesgo, siempre debe comprobar para la última versión de Growtopia Mod Apk y actualizarlo en consecuencia. </p>
47
- <h2>Conclusión</h2>
48
- <p>Growtopia es un juego divertido y creativo que te permite construir tu propio mundo y explorar los mundos de otros jugadores. Sin embargo, si quieres disfrutar del juego sin gastar dinero real o enfrentar limitaciones, es posible que desee probar Growtopia Mod Apk. Growtopia Mod Apk es una versión modificada del juego que le da gemas ilimitadas, recursos, objetos, mundos y más. También mejora la jugabilidad y los gráficos para una mejor experiencia. Sin embargo, Growtopia Mod Apk también tiene algunos riesgos que usted debe ser consciente de, tales como malware potencial y virus, posible prohibición de cuenta o suspensión, y la pérdida de progreso y datos. Por lo tanto, siempre debe descargar e instalar Growtopia Mod Apk de fuentes fiables, comprobar su seguridad y seguridad, y utilizarlo a su discreción y responsabilidad. </p>
49
- <h2>Preguntas frecuentes</h2>
50
- <p>Aquí están algunas de las preguntas más frecuentes sobre Growtopia Mod Apk:</p>
51
- <h3>Q: ¿Es Growtopia Mod Apk legal? </h3>
52
- <p>A: Growtopia Mod Apk no es legal, ya que viola los términos y condiciones de los desarrolladores de juegos. Usando Growtopia Mod Apk se considera una forma de engaño o piratería, y puede resultar en una prohibición de cuenta o suspensión. </p>
53
- <h3>Q: ¿Es seguro Growtopia Mod Apk? </h3>
54
-
55
- <h3>Q: ¿Cómo puedo actualizar Growtopia Mod Apk? </h3>
56
- <p>A: Para actualizar Growtopia Mod Apk, siempre debe comprobar la última versión del archivo apk mod desde el sitio web donde lo descargó. También debe comprobar si el archivo apk mod es compatible con la última versión del juego original de Google Play. Si hay una nueva versión disponible, debería descargarla e instalarla siguiendo los mismos pasos que antes. </p>
57
- <h3>Q: ¿Puedo jugar Growtopia Mod Apk en línea con otros jugadores? </h3>
58
- <p>A: Sí, usted puede jugar Growtopia Mod Apk en línea con otros jugadores que también están utilizando el archivo apk mod. Sin embargo, es posible que no puedas jugar con jugadores que estén usando el archivo de juego original de Google Play. También puede enfrentar algunos problemas o errores al jugar en línea, como retraso, desconexión, etc.</p>
59
- <h3>Q: ¿Puedo usar Growtopia Mod Apk en dispositivos iOS? </h3>
60
- <p>A: No, no se puede utilizar Growtopia Mod Apk en dispositivos iOS, ya que solo es compatible con dispositivos Android. Si quieres jugar a Growtopia en dispositivos iOS, tienes que descargar el archivo original del juego desde la App Store.</p> 64aa2da5cf<br />
61
- <br />
62
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Facebook Lite Mod Apk 2020.md DELETED
@@ -1,95 +0,0 @@
1
- <br />
2
- <h1>Instagram + APK Indir: Cómo descargar e instalar Instagram en su dispositivo Android</h1>
3
- <p>Instagram es una de las plataformas de redes sociales más populares del mundo, con más de 1 mil millones de usuarios. Te permite crear y compartir tus fotos, historias, carretes y videos con los amigos y seguidores que te importan. También puedes explorar la comunidad, descubrir nuevos contenidos y conectarte con tus marcas y celebridades favoritas. </p>
4
- <h2>descargar facebook lite mod apk 2020</h2><br /><p><b><b>Download File</b> &#10004; <a href="https://bltlly.com/2v6JW9">https://bltlly.com/2v6JW9</a></b></p><br /><br />
5
- <p>Pero ¿qué pasa si desea descargar e instalar Instagram en su dispositivo Android sin usar la Google Play Store? Tal vez desee acceder a las últimas funciones, evitar las restricciones regionales o ahorrar datos y espacio de almacenamiento. En ese caso, es necesario descargar e instalar Instagram APK en su dispositivo. </p>
6
- <p>En este artículo, vamos a explicar lo que Instagram y APK son, ¿por qué debe utilizarlos, y cómo descargar e instalar Instagram APK en su dispositivo Android. ¡Vamos a empezar! </p>
7
- <h2>¿Qué es Instagram y por qué debe usarlo</h2>
8
- <p>Instagram es una aplicación de redes sociales que te permite crear y compartir tus fotos, historias, carretes y videos con los amigos y seguidores que te importan. También puede ver, como, comentar y compartir videos de carretes en un espacio dedicado en la pestaña Carretes. Los carretes son clips cortos de hasta 30 segundos de duración que puede agregar efectos especiales, música de fondo, filtros faciales, emojis y pegatinas a. </p>
9
- <p>Instagram también te permite enviar mensajes a tus amigos en directo, chatear con ellos, compartir memes de forma privada, y recibir notificaciones cuando alguien le gusta o comentarios en tu publicación. También puedes publicar fotos y videos en tu feed que quieras mostrar en tu perfil, o agregarlos a tu historia que desaparecen después de 24 horas. </p>
10
- <h3>Características de Instagram</h3>
11
- <p>Algunas de las características de Instagram son:</p>
12
- <ul>
13
- <li>Crea y comparte fotos, historias, carretes y videos con tus amigos y seguidores</li>
14
- <li>Ver, como, comentar y compartir videos de carretes en un espacio dedicado</li>
15
-
16
- <li>Publicar fotos y vídeos a su feed o historia</li>
17
- <li>Explora la comunidad y descubre nuevos contenidos</li>
18
- <li>Sigue a tus bandas favoritas, celebridades, actores, atletas, cantantes y más</li>
19
- <li>Echa un vistazo a IGTV para vídeos más largos de tus creadores favoritos</li>
20
- <li>Descubre marcas y conéctate con pequeñas empresas locales</li>
21
- <li>Compra productos que complementan tu estilo personal</li>
22
- </ul>
23
- <h3>Beneficios de usar Instagram</h3>
24
- <p>Algunos de los beneficios de usar Instagram son:</p>
25
- <p></p>
26
- <ul>
27
- <li>Puedes expresarte creativa y auténticamente</li>
28
- <li>Puedes conectarte con personas que comparten tus intereses y pasiones</li>
29
- <li>Puedes aprender cosas nuevas e inspirarte por otros</li>
30
- <li>Puedes hacer crecer tu marca personal o profesional</li>
31
- <li>Puedes divertirte y disfrutar</li>
32
- </ul>
33
- <h2>¿Qué es APK y por qué debe descargarlo</h2>
34
- <p>APK significa Android Package Kit. Es un formato de archivo que contiene todos los elementos necesarios para instalar una aplicación en un dispositivo Android. Los archivos APK generalmente se descargan desde Google Play Store u otras fuentes oficiales. Sin embargo, a veces es posible que desee descargar un archivo APK de una fuente de terceros por varias razones. </p>
35
- <h3>Definición de APK</h3>
36
- <p>Un archivo APK es un archivo comprimido que contiene todos los elementos necesarios para instalar una aplicación en un dispositivo Android. Incluye:</p>
37
- <ul>
38
- <li>El código de la aplicación (en Java o Kotlin)</li>
39
- <li>Recursos de la aplicación (como imágenes, sonidos, fuentes, etc.)</li>
40
- <li>Las <h3>Ventajas de descargar APK</h3>
41
- <p>Algunas de las ventajas de descargar APK son:</p>
42
- <ul>
43
- <li> Puede acceder a las últimas características y actualizaciones de una aplicación antes de que estén disponibles en Google Play Store</li>
44
- <li> Puede evitar las restricciones regionales y las aplicaciones de acceso que no están disponibles en su país</li>
45
- <li> Puede ahorrar datos y espacio de almacenamiento descargando una versión más pequeña o optimizada de una aplicación</li>
46
- <li>Puede personalizar y modificar una aplicación según sus preferencias</li>
47
-
48
- </ul>
49
- <h2> Cómo descargar e instalar Instagram APK en su dispositivo Android</h2>
50
- <p>Si desea descargar e instalar Instagram APK en su dispositivo Android, es necesario seguir estos pasos:</p>
51
- <h3>Paso 1: Habilitar fuentes desconocidas</h3>
52
- <p>Antes de que pueda instalar un archivo APK en su dispositivo, debe habilitar la opción para permitir la instalación desde fuentes desconocidas. Esta es una medida de seguridad que evita que las aplicaciones maliciosas dañen su dispositivo. Para habilitar esta opción, debe:</p>
53
- <ol>
54
- <li>Ir a la configuración de su dispositivo y toque en Seguridad o Privacidad</li>
55
- <li>Encontrar la opción que dice Fuentes desconocidas o Instalar aplicaciones desconocidas y alternar en</li>
56
- <li>Confirma tu elección tocando OK o Permitir</li>
57
- </ol>
58
- <h3>Paso 2: Descargar Instagram APK de una fuente de confianza</h3>
59
- <p>Siguiente, es necesario descargar el archivo APK de Instagram de una fuente de confianza. Hay muchos sitios web que ofrecen archivos APK, pero algunos de ellos pueden contener virus o malware que pueden dañar su dispositivo. Por lo tanto, es necesario tener cuidado y elegir una fuente de buena reputación. Algunas de las fuentes de confianza para descargar Instagram APK son:</p>
60
- <ul>
61
- <li>[APKPure]</li>
62
- <li>[APKMirror]</li>
63
- <li>[Uptodown]</li>
64
- </ul>
65
- <p>Para descargar el archivo APK de Instagram de una de estas fuentes, es necesario:</p>
66
- <ol>
67
- <li>Abra su navegador y vaya al sitio web de su elección</li>
68
- <li>Busca Instagram o encuéntralo en las categorías o rankings</li>
69
- <li>Seleccione la versión de Instagram que desea descargar y toque en Descargar o Instalar</li>
70
- <li>Espere a que la descarga termine y localice el archivo en su carpeta de descargas o barra de notificaciones</li>
71
- </ol>
72
- <h3>Paso 3: Instalar Instagram APK en su dispositivo</h3>
73
- <p>Después de haber descargado el archivo APK de Instagram, es necesario instalarlo en su dispositivo. Para hacer esto, es necesario:</p>
74
- <ol>
75
- <li>Toque en el archivo o abrirlo con una aplicación de administrador de archivos</li>
76
- <li>Si se le solicita, toque en Instalar o Siguiente para iniciar el proceso de instalación</li>
77
-
78
- </ol>
79
- <h3>Paso 4: Lanza Instagram y disfruta</h3>
80
- <p>Felicidades! Usted ha descargado e instalado con éxito Instagram APK en su dispositivo Android. Ahora puede iniciar la aplicación y disfrutar de sus funciones. Puede iniciar sesión con su cuenta existente o crear una nueva. También puedes sincronizar tus contactos y amigos de Facebook para encontrar gente a la que seguir. </p>
81
- <h2>Conclusión</h2>
82
- <p>En este artículo, hemos explicado lo que Instagram y APK son, ¿por qué debe utilizarlos, y cómo descargar e instalar Instagram APK en su dispositivo Android. Esperamos que esta guía te haya ayudado a disfrutar más de Instagram y acceder a sus últimas funciones. Si tiene alguna pregunta o comentario, háganoslo saber en los comentarios a continuación. </p>
83
- <h2>Preguntas frecuentes</h2>
84
- <p>Aquí hay algunas preguntas frecuentes sobre Instagram y APK:</p>
85
- <h4>Q: ¿Es seguro descargar e instalar Instagram APK? </h4>
86
- <p>A: Sí, es seguro para descargar e instalar Instagram APK siempre y cuando se utiliza una fuente de confianza y permitir fuentes desconocidas en su dispositivo. Sin embargo, siempre debe tener cuidado y escanear el archivo en busca de virus o malware antes de instalarlo. </p>
87
- <h4>Q: ¿Perderé mis datos o cuenta si descargo e instalo Instagram APK? </h4>
88
- <p>A: No, no perderá sus datos o cuenta si descarga e instala Instagram APK. Puede iniciar sesión con su cuenta existente o crear una nueva. También puede realizar copias de seguridad de sus datos con Google Drive u otros servicios en la nube. </p>
89
- <h4>Q: ¿Cuáles son las diferencias entre Instagram APK e Instagram de la Google Play Store? </h4>
90
- <p>A: La principal diferencia entre Instagram APK e Instagram de la Google Play Store es que el primero puede tener nuevas características, actualizaciones, o correcciones de errores que aún no están disponibles en el último. El primero también puede tener diferentes versiones, tamaños o idiomas que el segundo. </p> <h4>Q: ¿Cómo puedo actualizar Instagram APK? </h4>
91
-
92
- <h4>Q: ¿Puedo usar Instagram APK en otros dispositivos además de Android? </h4>
93
- <p>A: No, no se puede utilizar Instagram APK en otros dispositivos además de Android. Los archivos APK solo son compatibles con dispositivos Android. Si desea usar Instagram en otros dispositivos, como iOS, Windows o Mac, debe descargar la aplicación desde las respectivas tiendas de aplicaciones o sitios web. </p> 64aa2da5cf<br />
94
- <br />
95
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Fr Leyendas Hiace Mod Apk.md DELETED
@@ -1,49 +0,0 @@
1
- <br />
2
- <h1>Descargar FR Leyendas Hiace Mod Apk: Una guía para los fans de la deriva</h1>
3
- <p>Si eres un fanático de los juegos de deriva, es posible que hayas oído hablar de <strong>FR Legends</strong>, un juego móvil que te permite conducir coches de deriva legendarios FR (motor delantero, tracción trasera) en los circuitos más icónicos del mundo, personalizar todo en tu coche, incluidos los cambios de motor y anchokits de cuerpo, y tienen batallas de deriva en tándem con los conductores de IA u otros jugadores en línea. </p>
4
- <h2>descargar fr leyendas hiace mod apk</h2><br /><p><b><b>Download File</b> &mdash;&mdash;&mdash; <a href="https://bltlly.com/2v6JNk">https://bltlly.com/2v6JNk</a></b></p><br /><br />
5
- <p>FR Legends es uno de los juegos de deriva más realistas y divertidos en dispositivos Android e iOS, con gráficos estilizados, controles intuitivos y sistemas de puntuación únicos basados en las reglas de la competencia del mundo real. El juego ha recibido excelentes críticas de jugadores y críticos por igual, con más de 10 millones de descargas y 4.4 estrellas en Google Play Store y App Store.</p>
6
- <p>Pero ¿qué pasa si desea darle vida a su experiencia de deriva con algunos nuevos coches y características? Bueno, ahí es donde el <strong>hiace mod apk</strong> entra en juego. Esta es una versión modificada del juego que añade una furgoneta Toyota Hiace como un coche manejable, junto con algunos otros ajustes y mejoras. En este artículo, le mostraremos cómo descargar e instalar el apk mod hiace en su dispositivo Android, y le dará algunos consejos y trucos para jugar leyendas FR con este mod. </p>
7
- <h2>¿Cuál es la apk mod hiace y qué características ofrece? </h2>
8
- <p>La apk hiace mod es una versión modificada de FR Legends que fue creada por NimoCustom, un modder que se especializa en la adición de nuevos coches y libreas para el juego. El mod añade un Toyota Hiace van como una nueva opción de coche en el juego, que es un vehículo popular para la deriva en Japón. La furgoneta tiene un modelo realista, sonido y física, y se puede personalizar con diferentes partes y colores. </p>
9
- <p></p>
10
- <p>El apk mod hiace también ofrece algunas otras características que mejoran la jugabilidad de FR Legends, tales como:</p>
11
- <ul>
12
- <li>Una nueva pista llamada Rampage Customs, que se basa en un circuito de deriva de la vida real en Indonesia.</li>
13
-
14
- <li>Un nuevo ángulo de cámara que le da una mejor vista de su coche y la pista. </li>
15
- <li>Una nueva interfaz de menú que es más fácil de usar y elegante. </li>
16
- <li>Una nueva lista de reproducción de música que añade algunas melodías optimistas al juego. </li>
17
- </ul>
18
- <h2>Cómo descargar e instalar el apk mod hiace en dispositivos Android? </h2>
19
- <p>Si desea probar el apk mod hiace en su dispositivo Android, tendrá que seguir estos pasos:</p>
20
- <ol>
21
- <li>En primer lugar, tendrá que habilitar fuentes desconocidas en la configuración del dispositivo. Esto le permitirá instalar aplicaciones desde fuentes distintas de Google Play Store. Para ello, vaya a Configuración > Aplicaciones y notificaciones > Acceso especial > Instalar aplicaciones desconocidas > Chrome (o cualquier navegador que utilice) > Permitir desde esta fuente. </li>
22
- <li>Siguiente, tendrá que descargar el archivo apk mod hiace de un sitio web confiable. Puedes encontrar el enlace a la última versión del mod en el canal de YouTube de NimoCustom, o buscarlo en Google. Asegúrese de descargar el archivo desde una fuente confiable y escanearlo en busca de virus antes de abrirlo. </li>
23
- <li>Una vez que haya descargado el archivo, tendrá que abrirlo con una aplicación de administrador de archivos (como Cx File Explorer o Administrador de archivos) y pulse Instalar. Siga las instrucciones de la pantalla para completar el proceso de instalación. </li>
24
- <li>Después de instalar la aplicación, puede iniciarla desde el cajón de la aplicación o la pantalla de inicio. Verá un nuevo icono para FR Legends con un fondo rojo. Toque para empezar a jugar el juego con el apk mod hiace. </li>
25
- </ol>
26
- <h2>Consejos y trucos para jugar FR Leyendas con el apk mod hiace</h <h2>Consejos y trucos para jugar FR Leyendas con el apk mod hiace</h2>
27
- <p>Ahora que ha instalado el apk mod hiace en su dispositivo Android, es posible que se pregunte cómo aprovechar al máximo. Aquí hay algunos consejos y trucos que te ayudarán a disfrutar aún más del juego:</p>
28
- <ul>
29
-
30
- <li>Experimenta con el nuevo editor de librea, que te permite crear tus propios diseños para tus coches. Puede acceder a ella desde el menú Garaje, en la pestaña Librea. Puede elegir entre diferentes colores, pegatinas, calcomanías y patrones, y guardar sus creaciones para su uso posterior. </li>
31
- <li>Cambiar al nuevo ángulo de la cámara, que le da una mejor vista de su coche y la pista. Puede cambiar el ángulo de la cámara pulsando el icono de la cámara en la esquina superior derecha de la pantalla. También puede hacer zoom hacia dentro y hacia fuera pellizcando la pantalla. </li>
32
- <li>Escuchar la nueva lista de reproducción de música, que añade algunas melodías optimistas para el juego. Puede ajustar el volumen y omitir pistas tocando el icono de música en la esquina superior izquierda de la pantalla. También puede apagar la música si lo prefiere. </li>
33
- <li>Diviértete con el Toyota Hiace van, que es un coche único e hilarante a la deriva con. Puede personalizarlo con diferentes partes y colores, y ver cómo se maneja en diferentes pistas. También puede desafiar a otros jugadores en línea o fuera de línea con este coche, y ver quién puede derivar mejor con una furgoneta. </li>
34
- </ul>
35
- <h2>Conclusión: Resuma los puntos principales e invite a los lectores a probar el juego</h2>
36
- <p>En conclusión, FR Legends es un fantástico juego de deriva que te permite conducir legendarios coches de deriva FR en los circuitos más icónicos del mundo, personalizar todo en su coche incluyendo cambios de motor y kits de cuerpo ancho, y tener batallas de deriva en tándem con conductores de IA u otros jugadores en línea. </p>
37
- <p>Si desea darle vida a su experiencia a la deriva con algunos nuevos coches y características, usted debe probar la apk mod hiace, que añade un Toyota Hiace van como un coche manejable, junto con algunos otros ajustes y mejoras. El mod es fácil de descargar e instalar en tu dispositivo Android, y te dará horas de diversión y risas. </p>
38
- <p>Entonces, ¿qué estás esperando? Descargar FR Leyendas hiace mod apk hoy y disfrutar de la deriva como nunca antes! </p>
39
- <h2>Preguntas frecuentes: Responde algunas preguntas comunes sobre el juego y el mod</h2>
40
- <tabla>
41
-
42
- <tr><td>Es FR leyendas hiace mod apk seguro de usar? </td><td>Sí, siempre y cuando lo descargue de un sitio web confiable y lo escanee en busca de virus antes de abrirlo. Sin embargo, debes tener en cuenta que el uso de mods puede afectar el rendimiento o la compatibilidad de tu juego, y puede que no sean compatibles con los desarrolladores oficiales. Use mods bajo su propio riesgo. </td></tr>
43
- <tr><td>Es FR leyendas hiace mod apk compatible con dispositivos iOS? </td><td>No, por desgracia, el apk mod hiace solo está disponible para dispositivos Android. Si tienes un dispositivo iOS, tendrás que jugar la versión original de FR Legends sin mods. </td></tr>
44
- <tr><td>¿Puedo jugar FR Legends hiace mod apk offline? </td><td>Sí, puedes jugar FR Legends hiace mod apk offline sin conexión a Internet. Sin embargo, no podrá acceder a algunas funciones como el modo multijugador en línea o el almacenamiento en la nube. </td></tr>
45
- <tr><td>¿Puedo jugar FR Legends hiace mod apk con mis amigos? </td><td>Sí, puedes jugar FR Legends mod apk hiace con tus amigos en línea o fuera de línea. Para jugar en línea, tendrá que crear o unirse a una sala desde el menú en línea. Para jugar sin conexión, tendrá que conectar sus dispositivos a través de Bluetooth o Wi-Fi desde el menú local. </td></tr>
46
- <tr><td>Puedo actualizar FR leyendas hiace mod apk? </td><td>Sí, puede actualizar FR Leyendas hiace mod apk cada vez que hay una nueva versión disponible en el canal de NimoCustom YouTube u otras fuentes. Sin embargo, debes hacer una copia de seguridad de los datos del juego antes de actualizarlo, ya que la actualización puede borrar tu progreso o causar problemas de compatibilidad. </td></tr>
47
- </tabla></p> 64aa2da5cf<br />
48
- <br />
49
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BernardoOlisan/vqganclip/taming-transformers/taming/data/sflckr.py DELETED
@@ -1,91 +0,0 @@
1
- import os
2
- import numpy as np
3
- import cv2
4
- import albumentations
5
- from PIL import Image
6
- from torch.utils.data import Dataset
7
-
8
-
9
- class SegmentationBase(Dataset):
10
- def __init__(self,
11
- data_csv, data_root, segmentation_root,
12
- size=None, random_crop=False, interpolation="bicubic",
13
- n_labels=182, shift_segmentation=False,
14
- ):
15
- self.n_labels = n_labels
16
- self.shift_segmentation = shift_segmentation
17
- self.data_csv = data_csv
18
- self.data_root = data_root
19
- self.segmentation_root = segmentation_root
20
- with open(self.data_csv, "r") as f:
21
- self.image_paths = f.read().splitlines()
22
- self._length = len(self.image_paths)
23
- self.labels = {
24
- "relative_file_path_": [l for l in self.image_paths],
25
- "file_path_": [os.path.join(self.data_root, l)
26
- for l in self.image_paths],
27
- "segmentation_path_": [os.path.join(self.segmentation_root, l.replace(".jpg", ".png"))
28
- for l in self.image_paths]
29
- }
30
-
31
- size = None if size is not None and size<=0 else size
32
- self.size = size
33
- if self.size is not None:
34
- self.interpolation = interpolation
35
- self.interpolation = {
36
- "nearest": cv2.INTER_NEAREST,
37
- "bilinear": cv2.INTER_LINEAR,
38
- "bicubic": cv2.INTER_CUBIC,
39
- "area": cv2.INTER_AREA,
40
- "lanczos": cv2.INTER_LANCZOS4}[self.interpolation]
41
- self.image_rescaler = albumentations.SmallestMaxSize(max_size=self.size,
42
- interpolation=self.interpolation)
43
- self.segmentation_rescaler = albumentations.SmallestMaxSize(max_size=self.size,
44
- interpolation=cv2.INTER_NEAREST)
45
- self.center_crop = not random_crop
46
- if self.center_crop:
47
- self.cropper = albumentations.CenterCrop(height=self.size, width=self.size)
48
- else:
49
- self.cropper = albumentations.RandomCrop(height=self.size, width=self.size)
50
- self.preprocessor = self.cropper
51
-
52
- def __len__(self):
53
- return self._length
54
-
55
- def __getitem__(self, i):
56
- example = dict((k, self.labels[k][i]) for k in self.labels)
57
- image = Image.open(example["file_path_"])
58
- if not image.mode == "RGB":
59
- image = image.convert("RGB")
60
- image = np.array(image).astype(np.uint8)
61
- if self.size is not None:
62
- image = self.image_rescaler(image=image)["image"]
63
- segmentation = Image.open(example["segmentation_path_"])
64
- assert segmentation.mode == "L", segmentation.mode
65
- segmentation = np.array(segmentation).astype(np.uint8)
66
- if self.shift_segmentation:
67
- # used to support segmentations containing unlabeled==255 label
68
- segmentation = segmentation+1
69
- if self.size is not None:
70
- segmentation = self.segmentation_rescaler(image=segmentation)["image"]
71
- if self.size is not None:
72
- processed = self.preprocessor(image=image,
73
- mask=segmentation
74
- )
75
- else:
76
- processed = {"image": image,
77
- "mask": segmentation
78
- }
79
- example["image"] = (processed["image"]/127.5 - 1.0).astype(np.float32)
80
- segmentation = processed["mask"]
81
- onehot = np.eye(self.n_labels)[segmentation]
82
- example["segmentation"] = onehot
83
- return example
84
-
85
-
86
- class Examples(SegmentationBase):
87
- def __init__(self, size=None, random_crop=False, interpolation="bicubic"):
88
- super().__init__(data_csv="data/sflckr_examples.txt",
89
- data_root="data/sflckr_images",
90
- segmentation_root="data/sflckr_segmentations",
91
- size=size, random_crop=random_crop, interpolation=interpolation)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pyparsing/results.py DELETED
@@ -1,760 +0,0 @@
1
- # results.py
2
- from collections.abc import MutableMapping, Mapping, MutableSequence, Iterator
3
- import pprint
4
- from weakref import ref as wkref
5
- from typing import Tuple, Any
6
-
7
- str_type: Tuple[type, ...] = (str, bytes)
8
- _generator_type = type((_ for _ in ()))
9
-
10
-
11
- class _ParseResultsWithOffset:
12
- __slots__ = ["tup"]
13
-
14
- def __init__(self, p1, p2):
15
- self.tup = (p1, p2)
16
-
17
- def __getitem__(self, i):
18
- return self.tup[i]
19
-
20
- def __getstate__(self):
21
- return self.tup
22
-
23
- def __setstate__(self, *args):
24
- self.tup = args[0]
25
-
26
-
27
- class ParseResults:
28
- """Structured parse results, to provide multiple means of access to
29
- the parsed data:
30
-
31
- - as a list (``len(results)``)
32
- - by list index (``results[0], results[1]``, etc.)
33
- - by attribute (``results.<results_name>`` - see :class:`ParserElement.set_results_name`)
34
-
35
- Example::
36
-
37
- integer = Word(nums)
38
- date_str = (integer.set_results_name("year") + '/'
39
- + integer.set_results_name("month") + '/'
40
- + integer.set_results_name("day"))
41
- # equivalent form:
42
- # date_str = (integer("year") + '/'
43
- # + integer("month") + '/'
44
- # + integer("day"))
45
-
46
- # parse_string returns a ParseResults object
47
- result = date_str.parse_string("1999/12/31")
48
-
49
- def test(s, fn=repr):
50
- print("{} -> {}".format(s, fn(eval(s))))
51
- test("list(result)")
52
- test("result[0]")
53
- test("result['month']")
54
- test("result.day")
55
- test("'month' in result")
56
- test("'minutes' in result")
57
- test("result.dump()", str)
58
-
59
- prints::
60
-
61
- list(result) -> ['1999', '/', '12', '/', '31']
62
- result[0] -> '1999'
63
- result['month'] -> '12'
64
- result.day -> '31'
65
- 'month' in result -> True
66
- 'minutes' in result -> False
67
- result.dump() -> ['1999', '/', '12', '/', '31']
68
- - day: '31'
69
- - month: '12'
70
- - year: '1999'
71
- """
72
-
73
- _null_values: Tuple[Any, ...] = (None, [], "", ())
74
-
75
- __slots__ = [
76
- "_name",
77
- "_parent",
78
- "_all_names",
79
- "_modal",
80
- "_toklist",
81
- "_tokdict",
82
- "__weakref__",
83
- ]
84
-
85
- class List(list):
86
- """
87
- Simple wrapper class to distinguish parsed list results that should be preserved
88
- as actual Python lists, instead of being converted to :class:`ParseResults`:
89
-
90
- LBRACK, RBRACK = map(pp.Suppress, "[]")
91
- element = pp.Forward()
92
- item = ppc.integer
93
- element_list = LBRACK + pp.delimited_list(element) + RBRACK
94
-
95
- # add parse actions to convert from ParseResults to actual Python collection types
96
- def as_python_list(t):
97
- return pp.ParseResults.List(t.as_list())
98
- element_list.add_parse_action(as_python_list)
99
-
100
- element <<= item | element_list
101
-
102
- element.run_tests('''
103
- 100
104
- [2,3,4]
105
- [[2, 1],3,4]
106
- [(2, 1),3,4]
107
- (2,3,4)
108
- ''', post_parse=lambda s, r: (r[0], type(r[0])))
109
-
110
- prints:
111
-
112
- 100
113
- (100, <class 'int'>)
114
-
115
- [2,3,4]
116
- ([2, 3, 4], <class 'list'>)
117
-
118
- [[2, 1],3,4]
119
- ([[2, 1], 3, 4], <class 'list'>)
120
-
121
- (Used internally by :class:`Group` when `aslist=True`.)
122
- """
123
-
124
- def __new__(cls, contained=None):
125
- if contained is None:
126
- contained = []
127
-
128
- if not isinstance(contained, list):
129
- raise TypeError(
130
- "{} may only be constructed with a list,"
131
- " not {}".format(cls.__name__, type(contained).__name__)
132
- )
133
-
134
- return list.__new__(cls)
135
-
136
- def __new__(cls, toklist=None, name=None, **kwargs):
137
- if isinstance(toklist, ParseResults):
138
- return toklist
139
- self = object.__new__(cls)
140
- self._name = None
141
- self._parent = None
142
- self._all_names = set()
143
-
144
- if toklist is None:
145
- self._toklist = []
146
- elif isinstance(toklist, (list, _generator_type)):
147
- self._toklist = (
148
- [toklist[:]]
149
- if isinstance(toklist, ParseResults.List)
150
- else list(toklist)
151
- )
152
- else:
153
- self._toklist = [toklist]
154
- self._tokdict = dict()
155
- return self
156
-
157
- # Performance tuning: we construct a *lot* of these, so keep this
158
- # constructor as small and fast as possible
159
- def __init__(
160
- self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance
161
- ):
162
- self._modal = modal
163
- if name is not None and name != "":
164
- if isinstance(name, int):
165
- name = str(name)
166
- if not modal:
167
- self._all_names = {name}
168
- self._name = name
169
- if toklist not in self._null_values:
170
- if isinstance(toklist, (str_type, type)):
171
- toklist = [toklist]
172
- if asList:
173
- if isinstance(toklist, ParseResults):
174
- self[name] = _ParseResultsWithOffset(
175
- ParseResults(toklist._toklist), 0
176
- )
177
- else:
178
- self[name] = _ParseResultsWithOffset(
179
- ParseResults(toklist[0]), 0
180
- )
181
- self[name]._name = name
182
- else:
183
- try:
184
- self[name] = toklist[0]
185
- except (KeyError, TypeError, IndexError):
186
- if toklist is not self:
187
- self[name] = toklist
188
- else:
189
- self._name = name
190
-
191
- def __getitem__(self, i):
192
- if isinstance(i, (int, slice)):
193
- return self._toklist[i]
194
- else:
195
- if i not in self._all_names:
196
- return self._tokdict[i][-1][0]
197
- else:
198
- return ParseResults([v[0] for v in self._tokdict[i]])
199
-
200
- def __setitem__(self, k, v, isinstance=isinstance):
201
- if isinstance(v, _ParseResultsWithOffset):
202
- self._tokdict[k] = self._tokdict.get(k, list()) + [v]
203
- sub = v[0]
204
- elif isinstance(k, (int, slice)):
205
- self._toklist[k] = v
206
- sub = v
207
- else:
208
- self._tokdict[k] = self._tokdict.get(k, list()) + [
209
- _ParseResultsWithOffset(v, 0)
210
- ]
211
- sub = v
212
- if isinstance(sub, ParseResults):
213
- sub._parent = wkref(self)
214
-
215
- def __delitem__(self, i):
216
- if isinstance(i, (int, slice)):
217
- mylen = len(self._toklist)
218
- del self._toklist[i]
219
-
220
- # convert int to slice
221
- if isinstance(i, int):
222
- if i < 0:
223
- i += mylen
224
- i = slice(i, i + 1)
225
- # get removed indices
226
- removed = list(range(*i.indices(mylen)))
227
- removed.reverse()
228
- # fixup indices in token dictionary
229
- for name, occurrences in self._tokdict.items():
230
- for j in removed:
231
- for k, (value, position) in enumerate(occurrences):
232
- occurrences[k] = _ParseResultsWithOffset(
233
- value, position - (position > j)
234
- )
235
- else:
236
- del self._tokdict[i]
237
-
238
- def __contains__(self, k) -> bool:
239
- return k in self._tokdict
240
-
241
- def __len__(self) -> int:
242
- return len(self._toklist)
243
-
244
- def __bool__(self) -> bool:
245
- return not not (self._toklist or self._tokdict)
246
-
247
- def __iter__(self) -> Iterator:
248
- return iter(self._toklist)
249
-
250
- def __reversed__(self) -> Iterator:
251
- return iter(self._toklist[::-1])
252
-
253
- def keys(self):
254
- return iter(self._tokdict)
255
-
256
- def values(self):
257
- return (self[k] for k in self.keys())
258
-
259
- def items(self):
260
- return ((k, self[k]) for k in self.keys())
261
-
262
- def haskeys(self) -> bool:
263
- """
264
- Since ``keys()`` returns an iterator, this method is helpful in bypassing
265
- code that looks for the existence of any defined results names."""
266
- return bool(self._tokdict)
267
-
268
- def pop(self, *args, **kwargs):
269
- """
270
- Removes and returns item at specified index (default= ``last``).
271
- Supports both ``list`` and ``dict`` semantics for ``pop()``. If
272
- passed no argument or an integer argument, it will use ``list``
273
- semantics and pop tokens from the list of parsed tokens. If passed
274
- a non-integer argument (most likely a string), it will use ``dict``
275
- semantics and pop the corresponding value from any defined results
276
- names. A second default return value argument is supported, just as in
277
- ``dict.pop()``.
278
-
279
- Example::
280
-
281
- numlist = Word(nums)[...]
282
- print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
283
-
284
- def remove_first(tokens):
285
- tokens.pop(0)
286
- numlist.add_parse_action(remove_first)
287
- print(numlist.parse_string("0 123 321")) # -> ['123', '321']
288
-
289
- label = Word(alphas)
290
- patt = label("LABEL") + Word(nums)[1, ...]
291
- print(patt.parse_string("AAB 123 321").dump())
292
-
293
- # Use pop() in a parse action to remove named result (note that corresponding value is not
294
- # removed from list form of results)
295
- def remove_LABEL(tokens):
296
- tokens.pop("LABEL")
297
- return tokens
298
- patt.add_parse_action(remove_LABEL)
299
- print(patt.parse_string("AAB 123 321").dump())
300
-
301
- prints::
302
-
303
- ['AAB', '123', '321']
304
- - LABEL: 'AAB'
305
-
306
- ['AAB', '123', '321']
307
- """
308
- if not args:
309
- args = [-1]
310
- for k, v in kwargs.items():
311
- if k == "default":
312
- args = (args[0], v)
313
- else:
314
- raise TypeError(
315
- "pop() got an unexpected keyword argument {!r}".format(k)
316
- )
317
- if isinstance(args[0], int) or len(args) == 1 or args[0] in self:
318
- index = args[0]
319
- ret = self[index]
320
- del self[index]
321
- return ret
322
- else:
323
- defaultvalue = args[1]
324
- return defaultvalue
325
-
326
- def get(self, key, default_value=None):
327
- """
328
- Returns named result matching the given key, or if there is no
329
- such name, then returns the given ``default_value`` or ``None`` if no
330
- ``default_value`` is specified.
331
-
332
- Similar to ``dict.get()``.
333
-
334
- Example::
335
-
336
- integer = Word(nums)
337
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
338
-
339
- result = date_str.parse_string("1999/12/31")
340
- print(result.get("year")) # -> '1999'
341
- print(result.get("hour", "not specified")) # -> 'not specified'
342
- print(result.get("hour")) # -> None
343
- """
344
- if key in self:
345
- return self[key]
346
- else:
347
- return default_value
348
-
349
- def insert(self, index, ins_string):
350
- """
351
- Inserts new element at location index in the list of parsed tokens.
352
-
353
- Similar to ``list.insert()``.
354
-
355
- Example::
356
-
357
- numlist = Word(nums)[...]
358
- print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
359
-
360
- # use a parse action to insert the parse location in the front of the parsed results
361
- def insert_locn(locn, tokens):
362
- tokens.insert(0, locn)
363
- numlist.add_parse_action(insert_locn)
364
- print(numlist.parse_string("0 123 321")) # -> [0, '0', '123', '321']
365
- """
366
- self._toklist.insert(index, ins_string)
367
- # fixup indices in token dictionary
368
- for name, occurrences in self._tokdict.items():
369
- for k, (value, position) in enumerate(occurrences):
370
- occurrences[k] = _ParseResultsWithOffset(
371
- value, position + (position > index)
372
- )
373
-
374
- def append(self, item):
375
- """
376
- Add single element to end of ``ParseResults`` list of elements.
377
-
378
- Example::
379
-
380
- numlist = Word(nums)[...]
381
- print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
382
-
383
- # use a parse action to compute the sum of the parsed integers, and add it to the end
384
- def append_sum(tokens):
385
- tokens.append(sum(map(int, tokens)))
386
- numlist.add_parse_action(append_sum)
387
- print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321', 444]
388
- """
389
- self._toklist.append(item)
390
-
391
- def extend(self, itemseq):
392
- """
393
- Add sequence of elements to end of ``ParseResults`` list of elements.
394
-
395
- Example::
396
-
397
- patt = Word(alphas)[1, ...]
398
-
399
- # use a parse action to append the reverse of the matched strings, to make a palindrome
400
- def make_palindrome(tokens):
401
- tokens.extend(reversed([t[::-1] for t in tokens]))
402
- return ''.join(tokens)
403
- patt.add_parse_action(make_palindrome)
404
- print(patt.parse_string("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
405
- """
406
- if isinstance(itemseq, ParseResults):
407
- self.__iadd__(itemseq)
408
- else:
409
- self._toklist.extend(itemseq)
410
-
411
- def clear(self):
412
- """
413
- Clear all elements and results names.
414
- """
415
- del self._toklist[:]
416
- self._tokdict.clear()
417
-
418
- def __getattr__(self, name):
419
- try:
420
- return self[name]
421
- except KeyError:
422
- if name.startswith("__"):
423
- raise AttributeError(name)
424
- return ""
425
-
426
- def __add__(self, other) -> "ParseResults":
427
- ret = self.copy()
428
- ret += other
429
- return ret
430
-
431
- def __iadd__(self, other) -> "ParseResults":
432
- if other._tokdict:
433
- offset = len(self._toklist)
434
- addoffset = lambda a: offset if a < 0 else a + offset
435
- otheritems = other._tokdict.items()
436
- otherdictitems = [
437
- (k, _ParseResultsWithOffset(v[0], addoffset(v[1])))
438
- for k, vlist in otheritems
439
- for v in vlist
440
- ]
441
- for k, v in otherdictitems:
442
- self[k] = v
443
- if isinstance(v[0], ParseResults):
444
- v[0]._parent = wkref(self)
445
-
446
- self._toklist += other._toklist
447
- self._all_names |= other._all_names
448
- return self
449
-
450
- def __radd__(self, other) -> "ParseResults":
451
- if isinstance(other, int) and other == 0:
452
- # useful for merging many ParseResults using sum() builtin
453
- return self.copy()
454
- else:
455
- # this may raise a TypeError - so be it
456
- return other + self
457
-
458
- def __repr__(self) -> str:
459
- return "{}({!r}, {})".format(type(self).__name__, self._toklist, self.as_dict())
460
-
461
- def __str__(self) -> str:
462
- return (
463
- "["
464
- + ", ".join(
465
- [
466
- str(i) if isinstance(i, ParseResults) else repr(i)
467
- for i in self._toklist
468
- ]
469
- )
470
- + "]"
471
- )
472
-
473
- def _asStringList(self, sep=""):
474
- out = []
475
- for item in self._toklist:
476
- if out and sep:
477
- out.append(sep)
478
- if isinstance(item, ParseResults):
479
- out += item._asStringList()
480
- else:
481
- out.append(str(item))
482
- return out
483
-
484
- def as_list(self) -> list:
485
- """
486
- Returns the parse results as a nested list of matching tokens, all converted to strings.
487
-
488
- Example::
489
-
490
- patt = Word(alphas)[1, ...]
491
- result = patt.parse_string("sldkj lsdkj sldkj")
492
- # even though the result prints in string-like form, it is actually a pyparsing ParseResults
493
- print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
494
-
495
- # Use as_list() to create an actual list
496
- result_list = result.as_list()
497
- print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
498
- """
499
- return [
500
- res.as_list() if isinstance(res, ParseResults) else res
501
- for res in self._toklist
502
- ]
503
-
504
- def as_dict(self) -> dict:
505
- """
506
- Returns the named parse results as a nested dictionary.
507
-
508
- Example::
509
-
510
- integer = Word(nums)
511
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
512
-
513
- result = date_str.parse_string('12/31/1999')
514
- print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
515
-
516
- result_dict = result.as_dict()
517
- print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
518
-
519
- # even though a ParseResults supports dict-like access, sometime you just need to have a dict
520
- import json
521
- print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
522
- print(json.dumps(result.as_dict())) # -> {"month": "31", "day": "1999", "year": "12"}
523
- """
524
-
525
- def to_item(obj):
526
- if isinstance(obj, ParseResults):
527
- return obj.as_dict() if obj.haskeys() else [to_item(v) for v in obj]
528
- else:
529
- return obj
530
-
531
- return dict((k, to_item(v)) for k, v in self.items())
532
-
533
- def copy(self) -> "ParseResults":
534
- """
535
- Returns a new copy of a :class:`ParseResults` object.
536
- """
537
- ret = ParseResults(self._toklist)
538
- ret._tokdict = self._tokdict.copy()
539
- ret._parent = self._parent
540
- ret._all_names |= self._all_names
541
- ret._name = self._name
542
- return ret
543
-
544
- def get_name(self):
545
- r"""
546
- Returns the results name for this token expression. Useful when several
547
- different expressions might match at a particular location.
548
-
549
- Example::
550
-
551
- integer = Word(nums)
552
- ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
553
- house_number_expr = Suppress('#') + Word(nums, alphanums)
554
- user_data = (Group(house_number_expr)("house_number")
555
- | Group(ssn_expr)("ssn")
556
- | Group(integer)("age"))
557
- user_info = user_data[1, ...]
558
-
559
- result = user_info.parse_string("22 111-22-3333 #221B")
560
- for item in result:
561
- print(item.get_name(), ':', item[0])
562
-
563
- prints::
564
-
565
- age : 22
566
- ssn : 111-22-3333
567
- house_number : 221B
568
- """
569
- if self._name:
570
- return self._name
571
- elif self._parent:
572
- par = self._parent()
573
-
574
- def find_in_parent(sub):
575
- return next(
576
- (
577
- k
578
- for k, vlist in par._tokdict.items()
579
- for v, loc in vlist
580
- if sub is v
581
- ),
582
- None,
583
- )
584
-
585
- return find_in_parent(self) if par else None
586
- elif (
587
- len(self) == 1
588
- and len(self._tokdict) == 1
589
- and next(iter(self._tokdict.values()))[0][1] in (0, -1)
590
- ):
591
- return next(iter(self._tokdict.keys()))
592
- else:
593
- return None
594
-
595
- def dump(self, indent="", full=True, include_list=True, _depth=0) -> str:
596
- """
597
- Diagnostic method for listing out the contents of
598
- a :class:`ParseResults`. Accepts an optional ``indent`` argument so
599
- that this string can be embedded in a nested display of other data.
600
-
601
- Example::
602
-
603
- integer = Word(nums)
604
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
605
-
606
- result = date_str.parse_string('1999/12/31')
607
- print(result.dump())
608
-
609
- prints::
610
-
611
- ['1999', '/', '12', '/', '31']
612
- - day: '31'
613
- - month: '12'
614
- - year: '1999'
615
- """
616
- out = []
617
- NL = "\n"
618
- out.append(indent + str(self.as_list()) if include_list else "")
619
-
620
- if full:
621
- if self.haskeys():
622
- items = sorted((str(k), v) for k, v in self.items())
623
- for k, v in items:
624
- if out:
625
- out.append(NL)
626
- out.append("{}{}- {}: ".format(indent, (" " * _depth), k))
627
- if isinstance(v, ParseResults):
628
- if v:
629
- out.append(
630
- v.dump(
631
- indent=indent,
632
- full=full,
633
- include_list=include_list,
634
- _depth=_depth + 1,
635
- )
636
- )
637
- else:
638
- out.append(str(v))
639
- else:
640
- out.append(repr(v))
641
- if any(isinstance(vv, ParseResults) for vv in self):
642
- v = self
643
- for i, vv in enumerate(v):
644
- if isinstance(vv, ParseResults):
645
- out.append(
646
- "\n{}{}[{}]:\n{}{}{}".format(
647
- indent,
648
- (" " * (_depth)),
649
- i,
650
- indent,
651
- (" " * (_depth + 1)),
652
- vv.dump(
653
- indent=indent,
654
- full=full,
655
- include_list=include_list,
656
- _depth=_depth + 1,
657
- ),
658
- )
659
- )
660
- else:
661
- out.append(
662
- "\n%s%s[%d]:\n%s%s%s"
663
- % (
664
- indent,
665
- (" " * (_depth)),
666
- i,
667
- indent,
668
- (" " * (_depth + 1)),
669
- str(vv),
670
- )
671
- )
672
-
673
- return "".join(out)
674
-
675
- def pprint(self, *args, **kwargs):
676
- """
677
- Pretty-printer for parsed results as a list, using the
678
- `pprint <https://docs.python.org/3/library/pprint.html>`_ module.
679
- Accepts additional positional or keyword args as defined for
680
- `pprint.pprint <https://docs.python.org/3/library/pprint.html#pprint.pprint>`_ .
681
-
682
- Example::
683
-
684
- ident = Word(alphas, alphanums)
685
- num = Word(nums)
686
- func = Forward()
687
- term = ident | num | Group('(' + func + ')')
688
- func <<= ident + Group(Optional(delimited_list(term)))
689
- result = func.parse_string("fna a,b,(fnb c,d,200),100")
690
- result.pprint(width=40)
691
-
692
- prints::
693
-
694
- ['fna',
695
- ['a',
696
- 'b',
697
- ['(', 'fnb', ['c', 'd', '200'], ')'],
698
- '100']]
699
- """
700
- pprint.pprint(self.as_list(), *args, **kwargs)
701
-
702
- # add support for pickle protocol
703
- def __getstate__(self):
704
- return (
705
- self._toklist,
706
- (
707
- self._tokdict.copy(),
708
- self._parent is not None and self._parent() or None,
709
- self._all_names,
710
- self._name,
711
- ),
712
- )
713
-
714
- def __setstate__(self, state):
715
- self._toklist, (self._tokdict, par, inAccumNames, self._name) = state
716
- self._all_names = set(inAccumNames)
717
- if par is not None:
718
- self._parent = wkref(par)
719
- else:
720
- self._parent = None
721
-
722
- def __getnewargs__(self):
723
- return self._toklist, self._name
724
-
725
- def __dir__(self):
726
- return dir(type(self)) + list(self.keys())
727
-
728
- @classmethod
729
- def from_dict(cls, other, name=None) -> "ParseResults":
730
- """
731
- Helper classmethod to construct a ``ParseResults`` from a ``dict``, preserving the
732
- name-value relations as results names. If an optional ``name`` argument is
733
- given, a nested ``ParseResults`` will be returned.
734
- """
735
-
736
- def is_iterable(obj):
737
- try:
738
- iter(obj)
739
- except Exception:
740
- return False
741
- else:
742
- return not isinstance(obj, str_type)
743
-
744
- ret = cls([])
745
- for k, v in other.items():
746
- if isinstance(v, Mapping):
747
- ret += cls.from_dict(v, name=k)
748
- else:
749
- ret += cls([v], name=k, asList=is_iterable(v))
750
- if name is not None:
751
- ret = cls([ret], name=name)
752
- return ret
753
-
754
- asList = as_list
755
- asDict = as_dict
756
- getName = get_name
757
-
758
-
759
- MutableMapping.register(ParseResults)
760
- MutableSequence.register(ParseResults)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/packages/__init__.py DELETED
File without changes
spaces/BilalSardar/Lyrics-Text_to_music/TMIDI.py DELETED
The diff for this file is too large to render. See raw diff
 
spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/count.h DELETED
@@ -1,80 +0,0 @@
1
- /******************************************************************************
2
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
- *
4
- * Redistribution and use in source and binary forms, with or without
5
- * modification, are permitted provided that the following conditions are met:
6
- * * Redistributions of source code must retain the above copyright
7
- * notice, this list of conditions and the following disclaimer.
8
- * * Redistributions in binary form must reproduce the above copyright
9
- * notice, this list of conditions and the following disclaimer in the
10
- * documentation and/or other materials provided with the distribution.
11
- * * Neither the name of the NVIDIA CORPORATION nor the
12
- * names of its contributors may be used to endorse or promote products
13
- * derived from this software without specific prior written permission.
14
- *
15
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
- * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
- *
26
- ******************************************************************************/
27
- #pragma once
28
-
29
-
30
- #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
31
- #include <thrust/system/cuda/config.h>
32
-
33
- #include <thrust/system/cuda/detail/util.h>
34
- #include <thrust/system/cuda/detail/reduce.h>
35
- #include <thrust/distance.h>
36
-
37
- namespace thrust
38
- {
39
- namespace cuda_cub {
40
-
41
- template <class Derived,
42
- class InputIt,
43
- class UnaryPred>
44
- typename iterator_traits<InputIt>::difference_type __host__ __device__
45
- count_if(execution_policy<Derived> &policy,
46
- InputIt first,
47
- InputIt last,
48
- UnaryPred unary_pred)
49
- {
50
- typedef typename iterator_traits<InputIt>::difference_type size_type;
51
- typedef transform_input_iterator_t<size_type,
52
- InputIt,
53
- UnaryPred>
54
- flag_iterator_t;
55
-
56
- return cuda_cub::reduce_n(policy,
57
- flag_iterator_t(first, unary_pred),
58
- thrust::distance(first, last),
59
- size_type(0),
60
- plus<size_type>());
61
- }
62
-
63
- template <class Derived,
64
- class InputIt,
65
- class Value>
66
- typename iterator_traits<InputIt>::difference_type __host__ __device__
67
- count(execution_policy<Derived> &policy,
68
- InputIt first,
69
- InputIt last,
70
- Value const & value)
71
- {
72
- return cuda_cub::count_if(policy,
73
- first,
74
- last,
75
- thrust::detail::equal_to_value<Value>(value));
76
- }
77
-
78
- } // namespace cuda_cub
79
- } // end namespace thrust
80
- #endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/transform_reduce.h DELETED
@@ -1,68 +0,0 @@
1
- /******************************************************************************
2
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
- *
4
- * Redistribution and use in source and binary forms, with or without
5
- * modification, are permitted provided that the following conditions are met:
6
- * * Redistributions of source code must retain the above copyright
7
- * notice, this list of conditions and the following disclaimer.
8
- * * Redistributions in binary form must reproduce the above copyright
9
- * notice, this list of conditions and the following disclaimer in the
10
- * documentation and/or other materials provided with the distribution.
11
- * * Neither the name of the NVIDIA CORPORATION nor the
12
- * names of its contributors may be used to endorse or promote products
13
- * derived from this software without specific prior written permission.
14
- *
15
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
- * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
- *
26
- ******************************************************************************/
27
- #pragma once
28
-
29
-
30
- #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
31
- #include <iterator>
32
- #include <thrust/system/cuda/detail/reduce.h>
33
- #include <thrust/distance.h>
34
-
35
- namespace thrust
36
- {
37
- namespace cuda_cub {
38
-
39
- template <class Derived,
40
- class InputIt,
41
- class TransformOp,
42
- class T,
43
- class ReduceOp>
44
- T __host__ __device__
45
- transform_reduce(execution_policy<Derived> &policy,
46
- InputIt first,
47
- InputIt last,
48
- TransformOp transform_op,
49
- T init,
50
- ReduceOp reduce_op)
51
- {
52
- typedef typename iterator_traits<InputIt>::difference_type size_type;
53
- size_type num_items = static_cast<size_type>(thrust::distance(first, last));
54
- typedef transform_input_iterator_t<T,
55
- InputIt,
56
- TransformOp>
57
- transformed_iterator_t;
58
-
59
- return cuda_cub::reduce_n(policy,
60
- transformed_iterator_t(first, transform_op),
61
- num_items,
62
- init,
63
- reduce_op);
64
- }
65
-
66
- } // namespace cuda_cub
67
- } // end namespace thrust
68
- #endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/monoscene_lite/monoscene/app.py DELETED
@@ -1,138 +0,0 @@
1
- from pytorch_lightning import Trainer
2
- from monoscene.models.monoscene import MonoScene
3
- from monoscene.data.NYU.nyu_dm import NYUDataModule
4
- from monoscene.data.semantic_kitti.kitti_dm import KittiDataModule
5
- from monoscene.data.kitti_360.kitti_360_dm import Kitti360DataModule
6
- # import hydra
7
- from omegaconf import DictConfig
8
- import torch
9
- import numpy as np
10
- import os
11
- from hydra.utils import get_original_cwd
12
- import gradio as gr
13
- import numpy as np
14
- import plotly.express as px
15
- import pandas as pd
16
-
17
-
18
- # @hydra.main(config_name="../config/monoscene.yaml")
19
- def plot(input_img):
20
- torch.set_grad_enabled(False)
21
-
22
- # Setup dataloader
23
- # if config.dataset == "kitti" or config.dataset == "kitti_360":
24
- feature = 64
25
- project_scale = 2
26
- full_scene_size = (256, 256, 32)
27
-
28
- # if config.dataset == "kitti":
29
- # data_module = KittiDataModule(
30
- # root=config.kitti_root,
31
- # preprocess_root=config.kitti_preprocess_root,
32
- # frustum_size=config.frustum_size,
33
- # batch_size=int(config.batch_size / config.n_gpus),
34
- # num_workers=int(config.num_workers_per_gpu * config.n_gpus),
35
- # )
36
- # data_module.setup()
37
- # data_loader = data_module.val_dataloader()
38
- # # data_loader = data_module.test_dataloader() # use this if you want to infer on test set
39
- # else:
40
- # data_module = Kitti360DataModule(
41
- # root=config.kitti_360_root,
42
- # sequences=[config.kitti_360_sequence],
43
- # n_scans=2000,
44
- # batch_size=1,
45
- # num_workers=3,
46
- # )
47
- # data_module.setup()
48
- # data_loader = data_module.dataloader()
49
-
50
- # elif config.dataset == "NYU":
51
- # project_scale = 1
52
- # feature = 200
53
- # full_scene_size = (60, 36, 60)
54
- # data_module = NYUDataModule(
55
- # root=config.NYU_root,
56
- # preprocess_root=config.NYU_preprocess_root,
57
- # n_relations=config.n_relations,
58
- # frustum_size=config.frustum_size,
59
- # batch_size=int(config.batch_size / config.n_gpus),
60
- # num_workers=int(config.num_workers_per_gpu * config.n_gpus),
61
- # )
62
- # data_module.setup()
63
- # data_loader = data_module.val_dataloader()
64
- # # data_loader = data_module.test_dataloader() # use this if you want to infer on test set
65
- # else:
66
- # print("dataset not support")
67
-
68
- # Load pretrained models
69
- # if config.dataset == "NYU":
70
- # model_path = os.path.join(
71
- # get_original_cwd(), "trained_models", "monoscene_nyu.ckpt"
72
- # )
73
- # else:
74
- # model_path = os.path.join(
75
- # get_original_cwd(), "trained_models", "monoscene_kitti.ckpt"
76
- # )
77
- model_path = "trained_models/monoscene_kitti.ckpt"
78
-
79
- model = MonoScene.load_from_checkpoint(
80
- model_path,
81
- feature=feature,
82
- project_scale=project_scale,
83
- fp_loss=False,
84
- full_scene_size=full_scene_size,
85
- )
86
- model.cuda()
87
- model.eval()
88
-
89
- print(input_img.shape)
90
-
91
- x = np.arange(12).reshape(4, 3) / 12
92
- data = pd.DataFrame(data=x, columns=['x', 'y', 'z'])
93
- fig = px.scatter_3d(data, x="x", y="y", z="z")
94
- return fig
95
-
96
- demo = gr.Interface(plot, gr.Image(shape=(200, 200)), gr.Plot())
97
- demo.launch()
98
-
99
-
100
-
101
- # Save prediction and additional data
102
- # to draw the viewing frustum and remove scene outside the room for NYUv2
103
- # output_path = os.path.join(config.output_path, config.dataset)
104
- # with torch.no_grad():
105
- # for batch in tqdm(data_loader):
106
- # batch["img"] = batch["img"].cuda()
107
- # pred = model(batch)
108
- # y_pred = torch.softmax(pred["ssc_logit"], dim=1).detach().cpu().numpy()
109
- # y_pred = np.argmax(y_pred, axis=1)
110
- # for i in range(config.batch_size):
111
- # out_dict = {"y_pred": y_pred[i].astype(np.uint16)}
112
- # if "target" in batch:
113
- # out_dict["target"] = (
114
- # batch["target"][i].detach().cpu().numpy().astype(np.uint16)
115
- # )
116
-
117
- # if config.dataset == "NYU":
118
- # write_path = output_path
119
- # filepath = os.path.join(write_path, batch["name"][i] + ".pkl")
120
- # out_dict["cam_pose"] = batch["cam_pose"][i].detach().cpu().numpy()
121
- # out_dict["vox_origin"] = (
122
- # batch["vox_origin"][i].detach().cpu().numpy()
123
- # )
124
- # else:
125
- # write_path = os.path.join(output_path, batch["sequence"][i])
126
- # filepath = os.path.join(write_path, batch["frame_id"][i] + ".pkl")
127
- # out_dict["fov_mask_1"] = (
128
- # batch["fov_mask_1"][i].detach().cpu().numpy()
129
- # )
130
- # out_dict["cam_k"] = batch["cam_k"][i].detach().cpu().numpy()
131
- # out_dict["T_velo_2_cam"] = (
132
- # batch["T_velo_2_cam"][i].detach().cpu().numpy()
133
- # )
134
-
135
- # os.makedirs(write_path, exist_ok=True)
136
- # with open(filepath, "wb") as handle:
137
- # pickle.dump(out_dict, handle)
138
- # print("wrote to", filepath)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/transfiner/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ.py DELETED
@@ -1,29 +0,0 @@
1
- from .mask_rcnn_R_50_FPN_100ep_LSJ import (
2
- dataloader,
3
- lr_multiplier,
4
- model,
5
- optimizer,
6
- train,
7
- )
8
- from detectron2.config import LazyCall as L
9
- from detectron2.modeling.backbone import RegNet
10
- from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock
11
-
12
- # Config source:
13
- # https://github.com/facebookresearch/detectron2/blob/master/configs/COCO-InstanceSegmentation/mask_rcnn_regnetx_4gf_dds_fpn_1x.py # noqa
14
- model.backbone.bottom_up = L(RegNet)(
15
- stem_class=SimpleStem,
16
- stem_width=32,
17
- block_class=ResBottleneckBlock,
18
- depth=23,
19
- w_a=38.65,
20
- w_0=96,
21
- w_m=2.43,
22
- group_width=40,
23
- norm="SyncBN",
24
- out_features=["s1", "s2", "s3", "s4"],
25
- )
26
- model.pixel_std = [57.375, 57.120, 58.395]
27
-
28
- # RegNets benefit from enabling cudnn benchmark mode
29
- train.cudnn_benchmark = True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChihChiu29/mychatbot/Dockerfile DELETED
@@ -1,24 +0,0 @@
1
- FROM python:3.9
2
-
3
- WORKDIR /code
4
-
5
- COPY ./requirements.txt /code/requirements.txt
6
-
7
- # For hugging face
8
- RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
9
-
10
- # For gpt4free
11
- RUN pip install gpt4free
12
-
13
- RUN useradd -m -u 1000 user
14
-
15
- USER user
16
-
17
- ENV HOME=/home/user \
18
- PATH=/home/user/.local/bin:$PATH
19
-
20
- WORKDIR $HOME/app
21
-
22
- COPY --chown=user . $HOME/app
23
-
24
- CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/meme_generator/memes/make_friend/__init__.py DELETED
@@ -1,50 +0,0 @@
1
- from pathlib import Path
2
- from typing import List
3
-
4
- from pil_utils import BuildImage, Text2Image
5
-
6
- from meme_generator import MemeArgsModel, add_meme
7
- from meme_generator.exception import TextOrNameNotEnough, TextOverLength
8
-
9
- img_dir = Path(__file__).parent / "images"
10
-
11
-
12
- def make_friend(images: List[BuildImage], texts: List[str], args: MemeArgsModel):
13
- img = images[0].convert("RGBA")
14
-
15
- if not texts and not args.user_infos:
16
- raise TextOrNameNotEnough("make_friend")
17
- name = texts[0] if texts else args.user_infos[0].name
18
-
19
- bg = BuildImage.open(img_dir / "0.png")
20
- frame = img.resize_width(1000)
21
- frame.paste(
22
- img.resize_width(250).rotate(9, expand=True),
23
- (743, frame.height - 155),
24
- alpha=True,
25
- )
26
- frame.paste(
27
- img.square().resize((55, 55)).rotate(9, expand=True),
28
- (836, frame.height - 278),
29
- alpha=True,
30
- )
31
- frame.paste(bg, (0, frame.height - 1000), alpha=True)
32
-
33
- text_img = Text2Image.from_text(name, 20, fill="white").to_image()
34
- if text_img.width > 230:
35
- raise TextOverLength(name)
36
-
37
- text_img = BuildImage(text_img).rotate(9, expand=True)
38
- frame.paste(text_img, (710, frame.height - 308), alpha=True)
39
- return frame.save_jpg()
40
-
41
-
42
- add_meme(
43
- "make_friend",
44
- make_friend,
45
- min_images=1,
46
- max_images=1,
47
- min_texts=0,
48
- max_texts=1,
49
- keywords=["交个朋友"],
50
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat.b4/g4f/Provider/Provider.py DELETED
@@ -1,16 +0,0 @@
1
- import os
2
- from ..typing import sha256, Dict, get_type_hints
3
-
4
- url = None
5
- model = None
6
- supports_stream = False
7
- needs_auth = False
8
-
9
-
10
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
11
- return
12
-
13
-
14
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
15
- '(%s)' % ', '.join(
16
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/sd-2.1/sd-2-1.py DELETED
@@ -1,16 +0,0 @@
1
- #import
2
- import gradio as gr
3
- import torch
4
-
5
- DESCRIPTION = '# Kandinsky 2.1'
6
- if not torch.cuda.is_available():
7
- DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
8
-
9
- #description
10
- description = """<div>
11
- CofAI.SD
12
- </div>
13
- """
14
-
15
- #model
16
- gr.Interface.load("models/stabilityai/stable-diffusion-2-1", description=description).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cropinky/hana_hanak_houses/realesrgan/data/__init__.py DELETED
@@ -1,10 +0,0 @@
1
- import importlib
2
- from basicsr.utils import scandir
3
- from os import path as osp
4
-
5
- # automatically scan and import dataset modules for registry
6
- # scan all the files that end with '_dataset.py' under the data folder
7
- data_folder = osp.dirname(osp.abspath(__file__))
8
- dataset_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(data_folder) if v.endswith('_dataset.py')]
9
- # import all the dataset modules
10
- _dataset_modules = [importlib.import_module(f'realesrgan.data.{file_name}') for file_name in dataset_filenames]
 
 
 
 
 
 
 
 
 
 
 
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/rpn/loss.py DELETED
@@ -1,153 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2
- """
3
- This file contains specific functions for computing losses on the RPN
4
- file
5
- """
6
-
7
- import torch
8
- from torch.nn import functional as F
9
- from maskrcnn_benchmark.config import cfg
10
-
11
- from .utils import concat_box_prediction_layers
12
-
13
- from ..balanced_positive_negative_sampler import BalancedPositiveNegativeSampler
14
- from ..utils import cat
15
-
16
- from maskrcnn_benchmark.layers import smooth_l1_loss
17
- from maskrcnn_benchmark.layers import iou_regress
18
- from maskrcnn_benchmark.modeling.matcher import Matcher
19
- from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
20
- from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist
21
-
22
-
23
- class RPNLossComputation(object):
24
- """
25
- This class computes the RPN loss.
26
- """
27
-
28
- def __init__(self, proposal_matcher, fg_bg_sampler, box_coder,
29
- generate_labels_func):
30
- """
31
- Arguments:
32
- proposal_matcher (Matcher)
33
- fg_bg_sampler (BalancedPositiveNegativeSampler)
34
- box_coder (BoxCoder)
35
- """
36
- self.proposal_matcher = proposal_matcher
37
- self.fg_bg_sampler = fg_bg_sampler
38
- self.box_coder = box_coder
39
- self.copied_fields = []
40
- self.generate_labels_func = generate_labels_func
41
- self.discard_cases = ['not_visibility', 'between_thresholds']
42
-
43
- def match_targets_to_anchors(self, anchor, target, copied_fields=[]):
44
-
45
- match_quality_matrix = boxlist_iou(target, anchor)
46
- matched_idxs = self.proposal_matcher(match_quality_matrix)
47
- # RPN doesn't need any fields from target for creating the labels, so clear them all
48
- target = target.copy_with_fields(copied_fields)
49
- # get the targets corresponding GT for each anchor
50
- # NB: need to clamp the indices because we can have a single
51
- # GT in the image, and matched_idxs can be -2, which goes
52
- # out of bounds
53
- matched_targets = target[matched_idxs.clamp(min=0)]
54
- matched_targets.add_field("matched_idxs", matched_idxs)
55
- return matched_targets
56
-
57
- def prepare_targets(self, anchors, targets):
58
- labels = []
59
- regression_targets = []
60
-
61
- for anchors_per_image, targets_per_image in zip(anchors, targets):
62
- matched_targets = self.match_targets_to_anchors(
63
- anchors_per_image, targets_per_image, self.copied_fields
64
- )
65
-
66
- matched_idxs = matched_targets.get_field("matched_idxs")
67
- labels_per_image = self.generate_labels_func(matched_targets)
68
- labels_per_image = labels_per_image.to(dtype=torch.float32)
69
-
70
- # Background (negative examples)
71
- bg_indices = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
72
- labels_per_image[bg_indices] = 0
73
-
74
- # discard anchors that go out of the boundaries of the image
75
- if "not_visibility" in self.discard_cases:
76
- labels_per_image[~anchors_per_image.get_field("visibility")] = -1
77
-
78
- # discard indices that are between thresholds
79
- if "between_thresholds" in self.discard_cases:
80
- inds_to_discard = matched_idxs == Matcher.BETWEEN_THRESHOLDS
81
- labels_per_image[inds_to_discard] = -1
82
- regression_targets_per_image = matched_targets.bbox
83
- labels.append(labels_per_image)
84
- regression_targets.append(regression_targets_per_image)
85
-
86
- return labels, regression_targets
87
-
88
-
89
- def __call__(self, anchors, objectness, box_regression, targets):
90
- """
91
- Arguments:
92
- anchors (list[BoxList])
93
- objectness (list[Tensor])
94
- box_regression (list[Tensor])
95
- targets (list[BoxList])
96
-
97
- Returns:
98
- objectness_loss (Tensor)
99
- box_loss (Tensor
100
- """
101
- anchors = [cat_boxlist(anchors_per_image) for anchors_per_image in anchors]
102
-
103
- labels, regression_targets = self.prepare_targets(anchors, targets)
104
- sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)
105
- sampled_pos_inds = torch.nonzero(torch.cat(sampled_pos_inds, dim=0)).squeeze(1)
106
- sampled_neg_inds = torch.nonzero(torch.cat(sampled_neg_inds, dim=0)).squeeze(1)
107
-
108
- sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0)
109
- objectness, box_regression = \
110
- concat_box_prediction_layers(objectness, box_regression)
111
- objectness = objectness.squeeze() # [1041820]
112
- labels = torch.cat(labels, dim=0)
113
- regression_targets = torch.cat(regression_targets, dim=0)
114
-
115
- box_loss = iou_regress(
116
- box_regression[sampled_pos_inds],
117
- regression_targets[sampled_pos_inds],
118
- beta=1.0 / 9,
119
- size_average=False,
120
- ) / (sampled_inds.numel())
121
-
122
- box_loss *= cfg.MODEL.ROI_BOUNDARY_HEAD.Loss_balance
123
-
124
- objectness_loss = F.binary_cross_entropy_with_logits(
125
- objectness[sampled_inds], labels[sampled_inds]
126
- )
127
- return objectness_loss, box_loss
128
-
129
- # This function should be overwritten in RetinaNet 11
130
- def generate_rpn_labels(matched_targets):
131
- matched_idxs = matched_targets.get_field("matched_idxs")
132
- labels_per_image = matched_idxs >= 0
133
- return labels_per_image
134
-
135
-
136
- def make_rpn_loss_evaluator(cfg, box_coder):
137
- matcher = Matcher(
138
- cfg.MODEL.RPN.FG_IOU_THRESHOLD,
139
- cfg.MODEL.RPN.BG_IOU_THRESHOLD,
140
- allow_low_quality_matches=True,
141
- )
142
-
143
- fg_bg_sampler = BalancedPositiveNegativeSampler(
144
- cfg.MODEL.RPN.BATCH_SIZE_PER_IMAGE, cfg.MODEL.RPN.POSITIVE_FRACTION
145
- )
146
-
147
- loss_evaluator = RPNLossComputation(
148
- matcher,
149
- fg_bg_sampler,
150
- box_coder,
151
- generate_rpn_labels
152
- )
153
- return loss_evaluator