parquet-converter commited on
Commit
8f0abd7
·
1 Parent(s): 00fb360

Update parquet files (step 7 of 121)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1gistliPinn/ChatGPT4/Examples/Aquachem Software Crack Free Download.md +0 -6
  2. spaces/1gistliPinn/ChatGPT4/Examples/Edius Free VERIFIED Download Full Version For Windows 7 32-bit Software.md +0 -20
  3. spaces/1gistliPinn/ChatGPT4/Examples/Eriyum Panikadu Book Free 16 !!TOP!!.md +0 -14
  4. spaces/1line/AutoGPT/data_ingestion.py +0 -96
  5. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/8 Ball Pool APK 5.9.0 - The Best Way to Experience the Worlds 1 Pool Game.md +0 -128
  6. spaces/1phancelerku/anime-remove-background/Defense Zone 3 APK The Best Tower Defense Game for Android.md +0 -154
  7. spaces/1phancelerku/anime-remove-background/Fast Orange VPN The Best Free Proxy App for Android.md +0 -94
  8. spaces/1phancelerku/anime-remove-background/Final Cut Pro for Windows - Is It Possible? Heres the Answer.md +0 -154
  9. spaces/A00001/bingothoo/tailwind.config.js +0 -48
  10. spaces/AI-Dashboards/Graph.NLP.Sentence.Similarity.Heatmap.KMeansCluster/app.py +0 -77
  11. spaces/AIGC-Audio/AudioGPT/NeuralSeq/utils/text_norm.py +0 -790
  12. spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/models/diffusion/dpm_solver/dpm_solver.py +0 -1154
  13. spaces/AISuperheroes/05GR-Image-To-Multilingual-OCR/app.py +0 -54
  14. spaces/AP123/dreamgaussian/mesh_utils.py +0 -147
  15. spaces/AbandonedMuse/UnlimitedMusicGen/Makefile +0 -21
  16. spaces/Abhilashvj/planogram-compliance/utils/__init__.py +0 -88
  17. spaces/Aditya9790/yolo7-object-tracking/detect.py +0 -163
  18. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dialog/methods/ButtonMethods.js +0 -333
  19. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/folder/methods/ConfigurationMethods.js +0 -37
  20. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/index.js +0 -12
  21. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/ResolveWidth.js +0 -23
  22. spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/video2audio.py +0 -27
  23. spaces/Alfaxad/BioGalacticModels/app.py +0 -114
  24. spaces/Alycer/VITS-Umamusume-voice-synthesizer/ONNXVITS_inference.py +0 -36
  25. spaces/Amrrs/DragGan-Inversion/torch_utils/ops/upfirdn2d.py +0 -412
  26. spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/decoder/__init__.py +0 -6
  27. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_model_editing.py +0 -757
  28. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/dummy_note_seq_objects.py +0 -17
  29. spaces/Andy1621/uniformer_image_detection/configs/fp16/retinanet_r50_fpn_fp16_1x_coco.py +0 -3
  30. spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context.py +0 -2
  31. spaces/AndySAnker/DeepStruc/tools/data_loader.py +0 -236
  32. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/multimodal/script.py +0 -112
  33. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/__init__.py +0 -27
  34. spaces/ArdaSaygan/PollGeneratorApp/app.py +0 -60
  35. spaces/Arnx/MusicGenXvAKN/tests/data/test_audio_dataset.py +0 -352
  36. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyparsing/util.py +0 -235
  37. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/resolvelib/__init__.py +0 -26
  38. spaces/Audio-AGI/AudioSep/models/CLAP/open_clip/version.py +0 -1
  39. spaces/AzinZ/vitscn/modules.py +0 -390
  40. spaces/Benson/text-generation/Examples/Cmo Descargar Fifa 2022 Apk.md +0 -45
  41. spaces/Benson/text-generation/Examples/Descargar Fifa Street 4 Pc Bagas31.md +0 -155
  42. spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/ec2/__init__.py +0 -12
  43. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/models/scheme.py +0 -31
  44. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/database.py +0 -1350
  45. spaces/Billius/runwayml-stable-diffusion-v1-5-04-07-2023/app.py +0 -3
  46. spaces/Blessin/impro-scene-generator/app.py +0 -69
  47. spaces/CVH-vn1210/make_hair/README.md +0 -14
  48. spaces/CVH-vn1210/make_hair/minigpt4/datasets/datasets/cc_combine_dataset.py +0 -53
  49. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/.github/ISSUE_TEMPLATE.md +0 -5
  50. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/TridentNet/tridentnet/trident_rcnn.py +0 -110
spaces/1gistliPinn/ChatGPT4/Examples/Aquachem Software Crack Free Download.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>aquachem software crack free download</h2><br /><p><b><b>Download Zip</b> &gt; <a href="https://imgfil.com/2uxXtJ">https://imgfil.com/2uxXtJ</a></b></p><br /><br />
2
-
3
- Schlumberger AquaChem is a groundwater software package for anyone working ... وارد پوشه Crack شده و فایل LicenseManager.dll را کپی کرده و در محل نصب نرم ... Full Licensed, Free License, Cracked, Repacked, Direct Download Link, DDL, ... 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Edius Free VERIFIED Download Full Version For Windows 7 32-bit Software.md DELETED
@@ -1,20 +0,0 @@
1
- <br />
2
- <h1>How to Download and Install EDIUS Pro for Windows 7 32-bit</h1>
3
- <p>EDIUS Pro is a powerful video editing software that supports various formats and resolutions. It allows you to create professional-looking projects with ease and speed. EDIUS Pro is compatible with Windows 7 32-bit, but you need to have an EDIUS ID and QuickTime installed on your computer before you can use it. In this article, we will show you how to download and install EDIUS Pro for Windows 7 32-bit in a few simple steps.</p>
4
- <h2>Step 1: Download EDIUS Pro from the official website</h2>
5
- <p>The first thing you need to do is to download EDIUS Pro from the official website. You can choose between the latest version (EDIUS X) or the previous versions (EDIUS 9 or EDIUS 8). The latest version has more features and a redesigned core engine, but it also requires more system resources. The previous versions are still supported and updated, but they have less functionality. You can compare the different versions here: <a href="https://www.edius.net/compare.html">https://www.edius.net/compare.html</a></p>
6
- <h2>edius free download full version for windows 7 32-bit software</h2><br /><p><b><b>Download Zip</b> ---> <a href="https://imgfil.com/2uy0Ey">https://imgfil.com/2uy0Ey</a></b></p><br /><br />
7
- <p>To download EDIUS Pro, go to <a href="https://www.edius.net/edius_download.html">https://www.edius.net/edius_download.html</a> and select the version you want. You will see a zip file with a size of about 1 GB (for EDIUS X) or 800 MB (for EDIUS 9 or EDIUS 8). Click on the download button and save the file to your computer.</p>
8
- <h2>Step 2: Extract the zip file and run the setup</h2>
9
- <p>After you have downloaded the zip file, you need to extract it to a folder on your computer. You can use any file compression software, such as WinRAR or 7-Zip, to do this. Right-click on the zip file and choose "Extract here" or "Extract to" and select a destination folder.</p>
10
- <p>Once you have extracted the zip file, you will see a folder with several files and subfolders. Double-click on the "Setup.exe" file to start the installation process. You will see a welcome screen with the End-User License Agreement. Read it carefully and click on "Accept" if you agree with the terms. Then click on "Install" to begin the installation.</p>
11
- <h2>Step 3: Follow the instructions and complete the installation</h2>
12
- <p>The installation process is quite simple and straightforward. You don't need to choose any options or settings, as everything is done automatically for you. The installer will copy the necessary files and create a shortcut icon on your desktop. The installation may take several minutes, depending on your system speed and performance.</p>
13
- <p>When the installation is finished, you will see a message saying "Installation completed successfully". Click on "Finish" to exit the installer. You can now launch EDIUS Pro from your desktop or start menu.</p>
14
- <p></p>
15
- <h2>Step 4: Activate EDIUS Pro with your EDIUS ID</h2>
16
- <p>The last step you need to do is to activate EDIUS Pro with your EDIUS ID. An EDIUS ID is a unique identifier that allows you to use EDIUS Pro and access its online services. If you don't have an EDIUS ID, you can create one for free at <a href="https://ediusid1.grassvalley.com/">https://ediusid1.grassvalley.com/</a>. You will need to provide your name, email address, country, and password.</p>
17
- <p>Once you have an EDIUS ID, you can activate EDIUS Pro by entering it in the activation window that appears when you start EDIUS Pro for the first time. You will also need to enter your serial number, which is provided when you purchase EDIUS Pro from an authorized reseller or request a free 30-day trial version.</p>
18
- <p>After you enter your EDIUS ID and serial number, click on "Activate" and wait for a few seconds. You will see a message saying "Activation completed successfully". Click on "OK" to close</p> d5da3c52bf<br />
19
- <br />
20
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Eriyum Panikadu Book Free 16 !!TOP!!.md DELETED
@@ -1,14 +0,0 @@
1
- <br />
2
- <h1>Eriyum Panikadu: A Novel About the Plight of Tea Plantation Workers</h1>
3
- <p>Eriyum Panikadu is a Tamil novel by P.H. Daniel, translated by Ra. Murugavel. It was first published in 1969 and is considered a classic of Tamil literature. The novel depicts the life and struggles of the tea plantation workers in the Nilgiris during the colonial era. The novel is based on the author's personal experience as a doctor and a trade union leader among the workers.</p>
4
- <h2>Eriyum Panikadu Book Free 16</h2><br /><p><b><b>Download File</b> &raquo;&raquo;&raquo; <a href="https://imgfil.com/2uxYiF">https://imgfil.com/2uxYiF</a></b></p><br /><br />
5
- <p>The novel follows the story of Selvan, a young worker who dreams of a better life for himself and his people. He falls in love with Valli, a beautiful girl from another plantation, and hopes to marry her someday. However, he faces many obstacles and challenges from the oppressive system of the planters, who exploit and abuse the workers mercilessly. The novel also portrays the social and cultural aspects of the workers, such as their festivals, rituals, beliefs, customs, and language.</p>
6
- <p>Eriyum Panikadu is a powerful and realistic novel that exposes the harsh realities of the tea plantation industry and its impact on the workers. It also highlights the importance of education, organization, and resistance among the oppressed classes. The novel has been praised for its vivid narration, rich characterization, and historical accuracy. It has been adapted into a film by Bala in 2013, titled Paradesi.</p>
7
- <p>Eriyum Panikadu is a novel that deserves to be read by everyone who wants to learn more about the history and culture of Tamil Nadu and its people. It is a novel that will move you, inspire you, and make you think. You can download Eriyum Panikadu book for free from SoundCloud[^2^] [^3^] or buy it from Amazon[^1^].</p>
8
- <p></p>
9
-
10
- <p>The novel Eriyum Panikadu is not only a historical fiction, but also a social commentary on the contemporary issues of caste, class, and gender. The novel exposes the discrimination and violence faced by the workers, who belong to the Dalit community, from the upper-caste planters and the British officials. The novel also shows the plight of the women workers, who are subjected to sexual harassment, rape, and forced sterilization. The novel challenges the stereotypes and prejudices that are prevalent in the society and calls for social justice and equality.</p>
11
- <p>The novel Eriyum Panikadu is also a literary masterpiece that showcases the beauty and richness of the Tamil language and culture. The novel uses a variety of dialects and registers to capture the authentic voice of the workers and their environment. The novel also incorporates many folk songs, proverbs, idioms, and metaphors that reflect the wisdom and creativity of the workers. The novel is a tribute to the resilience and courage of the workers, who despite their hardships, manage to find joy and hope in their lives.</p>
12
- <p>The novel Eriyum Panikadu is a must-read for anyone who loves literature and history. It is a novel that will make you laugh, cry, angry, and hopeful. It is a novel that will teach you about the past and inspire you for the future. It is a novel that will stay with you forever.</p> d5da3c52bf<br />
13
- <br />
14
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1line/AutoGPT/data_ingestion.py DELETED
@@ -1,96 +0,0 @@
1
- import argparse
2
- import logging
3
-
4
- from autogpt.commands.file_operations import ingest_file, search_files
5
- from autogpt.config import Config
6
- from autogpt.memory import get_memory
7
-
8
- cfg = Config()
9
-
10
-
11
- def configure_logging():
12
- logging.basicConfig(
13
- filename="log-ingestion.txt",
14
- filemode="a",
15
- format="%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s",
16
- datefmt="%H:%M:%S",
17
- level=logging.DEBUG,
18
- )
19
- return logging.getLogger("AutoGPT-Ingestion")
20
-
21
-
22
- def ingest_directory(directory, memory, args):
23
- """
24
- Ingest all files in a directory by calling the ingest_file function for each file.
25
-
26
- :param directory: The directory containing the files to ingest
27
- :param memory: An object with an add() method to store the chunks in memory
28
- """
29
- try:
30
- files = search_files(directory)
31
- for file in files:
32
- ingest_file(file, memory, args.max_length, args.overlap)
33
- except Exception as e:
34
- print(f"Error while ingesting directory '{directory}': {str(e)}")
35
-
36
-
37
- def main() -> None:
38
- logger = configure_logging()
39
-
40
- parser = argparse.ArgumentParser(
41
- description="Ingest a file or a directory with multiple files into memory. "
42
- "Make sure to set your .env before running this script."
43
- )
44
- group = parser.add_mutually_exclusive_group(required=True)
45
- group.add_argument("--file", type=str, help="The file to ingest.")
46
- group.add_argument(
47
- "--dir", type=str, help="The directory containing the files to ingest."
48
- )
49
- parser.add_argument(
50
- "--init",
51
- action="store_true",
52
- help="Init the memory and wipe its content (default: False)",
53
- default=False,
54
- )
55
- parser.add_argument(
56
- "--overlap",
57
- type=int,
58
- help="The overlap size between chunks when ingesting files (default: 200)",
59
- default=200,
60
- )
61
- parser.add_argument(
62
- "--max_length",
63
- type=int,
64
- help="The max_length of each chunk when ingesting files (default: 4000)",
65
- default=4000,
66
- )
67
-
68
- args = parser.parse_args()
69
-
70
- # Initialize memory
71
- memory = get_memory(cfg, init=args.init)
72
- print("Using memory of type: " + memory.__class__.__name__)
73
-
74
- if args.file:
75
- try:
76
- ingest_file(args.file, memory, args.max_length, args.overlap)
77
- print(f"File '{args.file}' ingested successfully.")
78
- except Exception as e:
79
- logger.error(f"Error while ingesting file '{args.file}': {str(e)}")
80
- print(f"Error while ingesting file '{args.file}': {str(e)}")
81
- elif args.dir:
82
- try:
83
- ingest_directory(args.dir, memory, args)
84
- print(f"Directory '{args.dir}' ingested successfully.")
85
- except Exception as e:
86
- logger.error(f"Error while ingesting directory '{args.dir}': {str(e)}")
87
- print(f"Error while ingesting directory '{args.dir}': {str(e)}")
88
- else:
89
- print(
90
- "Please provide either a file path (--file) or a directory name (--dir)"
91
- " inside the auto_gpt_workspace directory as input."
92
- )
93
-
94
-
95
- if __name__ == "__main__":
96
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/8 Ball Pool APK 5.9.0 - The Best Way to Experience the Worlds 1 Pool Game.md DELETED
@@ -1,128 +0,0 @@
1
-
2
- <h1>8 Ball Pool 5.9.0 APK Download: Everything You Need to Know</h1>
3
- <p>If you are a fan of pool games, you have probably heard of 8 Ball Pool, the most popular and addictive pool game for Android devices. In this article, we will tell you everything you need to know about the latest version of this game, 8 Ball Pool 5.9.0, and how to download and install it on your device.</p>
4
- <h2>What is 8 Ball Pool?</h2>
5
- <p>8 Ball Pool is a pool game developed by Miniclip.com, a leading online gaming company. It allows you to play online with millions of players from around the world, or challenge your friends in one-on-one matches. You can also participate in tournaments, win trophies, and collect coins and cues.</p>
6
- <h2>8 ball pool 5.9.0 apk download</h2><br /><p><b><b>Download File</b> &#8230; <a href="https://urlin.us/2uT1z6">https://urlin.us/2uT1z6</a></b></p><br /><br />
7
- <h3>Features of 8 Ball Pool</h3>
8
- <p>Some of the features that make 8 Ball Pool stand out from other pool games are:</p>
9
- <ul>
10
- <li>Realistic physics and graphics that simulate the real pool experience.</li>
11
- <li>A variety of game modes, such as Classic, No Guidelines, 9 Ball, and more.</li>
12
- <li>A huge collection of cues, each with different stats and powers.</li>
13
- <li>A leveling system that matches you with players of similar skill level.</li>
14
- <li>A chat feature that lets you communicate with your opponents and friends.</li>
15
- <li>A club feature that lets you join or create a club and compete with other clubs.</li>
16
- <li>A shop feature that lets you buy coins, cues, chat packs, and more.</li>
17
- </ul>
18
- <h3>How to play 8 Ball Pool</h3>
19
- <p>The rules of 8 Ball Pool are simple and similar to the real pool game. You have to pot all your balls (solid or striped) before your opponent does, and then pot the black ball (the 8 ball) to win the game. You can use the cue stick to aim and adjust the power of your shot, and use the spin button to add spin to the cue ball. You have to be careful not to pot the cue ball or the wrong balls, or you will lose your turn or the game.</p>
20
- <h2>What is new in 8 Ball Pool 5.9.0?</h2>
21
- <p>The latest version of 8 Ball Pool, released on June 14, 2023, brings some exciting new features and improvements to the game. Here are some of them:</p>
22
- <h3>New game mode: Power Pots</h3>
23
- <p>Power Pots is a new game mode that challenges you to pot as many balls as possible in a limited time. The more balls you pot, the more points you get. You can also use power-ups to boost your score and get extra time. Power Pots is available for a limited time only, so don't miss it!</p>
24
- <h3>New rewards and events</h3>
25
- <p>8 Ball Pool 5.9.0 also introduces new rewards and events for you to enjoy. You can earn free coins, cues, chat packs, and more by completing daily missions, watching videos, spinning the wheel, and playing mini-games. You can also join seasonal events and win exclusive prizes by ranking high on the leaderboards.</p>
26
- <h3>Bug fixes and improvements</h3>
27
- <p>As always, 8 Ball Pool 5.9.0 also fixes some bugs and improves the performance and stability of the game. Some of the issues that have been resolved are:</p>
28
- <p>8 ball pool 5.9.0 arm64 apk download<br />
29
- 8 ball pool 5.9.0 apk download for windows pc<br />
30
- 8 ball pool 5.9.0 apk download softpedia<br />
31
- 8 ball pool 5.9.0 apk download apkpure<br />
32
- 8 ball pool 5.9.0 apk download miniclip<br />
33
- 8 ball pool 5.9.0 apk download android<br />
34
- 8 ball pool 5.9.0 apk download latest version<br />
35
- 8 ball pool 5.9.0 apk download free<br />
36
- 8 ball pool 5.9.0 apk download mod<br />
37
- 8 ball pool 5.9.0 apk download unlimited coins<br />
38
- 8 ball pool 5.9.0 apk download hack<br />
39
- 8 ball pool 5.9.0 apk download offline<br />
40
- 8 ball pool 5.9.0 apk download no root<br />
41
- 8 ball pool 5.9.0 apk download emulator<br />
42
- 8 ball pool 5.9.0 apk download with mouse and keyboard<br />
43
- 8 ball pool 5.9.0 apk download for pc windows 10<br />
44
- 8 ball pool 5.9.0 apk download for pc windows 7<br />
45
- 8 ball pool 5.9.0 apk download for mac<br />
46
- 8 ball pool 5.9.0 apk download for laptop<br />
47
- 8 ball pool 5.9.0 apk download for chromebook<br />
48
- 8 ball pool 5.9.0 apk download for tablet<br />
49
- 8 ball pool 5.9.0 apk download for firestick<br />
50
- 8 ball pool 5.9.0 apk download for smart tv<br />
51
- 8 ball pool 5.9.0 apk download for ios<br />
52
- 8 ball pool 5.9.0 apk download for iphone<br />
53
- 8 ball pool 5.9.0 apk download for ipad<br />
54
- 8 ball pool 5.9.0 apk download for ipod touch<br />
55
- 8 ball pool 5.9.0 apk download from google play store<br />
56
- 8 ball pool 5.9.0 apk download from uptodown<br />
57
- 8 ball pool 5.9.0 apk download from apkmirror<br />
58
- how to install and play the game of the famous miniclip.com adapted for the android platform with the help of the app description of the softpedia website[^1^]<br />
59
- how to install and play the game on windows with an emulator and adapt its controls to a mouse and keyboard as explained by the apkpure website[^2^]<br />
60
- how to update the game to the latest version of the app with the help of the softpedia website[^1^]<br />
61
- how to uninstall the game from your device with the help of the apkpure website[^2^]<br />
62
- how to play the game online with other players from around the world with the help of the miniclip.com website[^1^]<br />
63
- how to play the game offline with your friends or against the computer with the help of the apkpure website[^2^]<br />
64
- how to customize your cue and table in the game with the help of the miniclip.com website[^1^]<br />
65
- how to earn coins and cash in the game with the help of the apkpure website[^2^]<br />
66
- how to join clubs and compete in tournaments in the game with the help of the miniclip.com website[^1^]<br />
67
- how to chat and send gifts to other players in the game with the help of the apkpure website[^2^]</p>
68
- <ul>
69
- <li>The cue ball disappearing after a foul.</li>
70
- <li>The chat messages not showing up correctly.</li>
71
- <li>The club chat not working properly.</li>
72
- <li>The game crashing or freezing or lagging during gameplay.</li>
73
- </ul>
74
- <h2>How to download and install 8 Ball Pool 5.9.0 APK?</h2>
75
- <p>If you want to enjoy the new features and improvements of 8 Ball Pool 5.9.0, you have to download and install the APK file on your Android device. There are two ways to do this:</p>
76
- <h3>Download from official sources</h3>
77
- <p>The easiest and safest way to download 8 Ball Pool 5.9.0 APK is from the official sources, such as Google Play Store or Miniclip.com website. You just have to follow these steps:</p>
78
- <ol>
79
- <li>Open the Google Play Store app on your device, or visit the Miniclip.com website on your browser.</li>
80
- <li>Search for 8 Ball Pool and tap on the game icon.</li>
81
- <li>Tap on the Update button and wait for the download to finish.</li>
82
- <li>Tap on the Open button and enjoy the game.</li>
83
- </ol>
84
- <h3>Download from third-party sources</h3>
85
- <p>Another way to download 8 Ball Pool 5.9.0 APK is from third-party sources, such as APKPure, APKMirror, or other websites that offer APK files. However, this method is not recommended, as it may expose your device to malware or viruses. If you still want to try this method, you have to follow these steps:</p>
86
- <ol>
87
- <li>Visit a website that offers 8 Ball Pool 5.9.0 APK, such as APKPure or APKMirror.</li>
88
- <li>Search for 8 Ball Pool and tap on the download button.</li>
89
- <li>Wait for the download to finish and locate the APK file on your device.</li>
90
- </ol>
91
- <h3>Install the APK file</h3>
92
- <p>Before you can install the APK file on your device, you have to enable the Unknown Sources option in your settings. This will allow you to install apps from sources other than the Google Play Store. To do this, follow these steps:</p>
93
- <ol>
94
- <li>Go to Settings > Security > Unknown Sources and toggle it on.</li>
95
- <li>A warning message will pop up, asking you to confirm your action. Tap on OK.</li>
96
- </ol>
97
- <p>Now, you can install the APK file by following these steps:</p>
98
- <ol>
99
- <li>Locate the APK file on your device and tap on it.</li>
100
- <li>A prompt will appear, asking you to install the app. Tap on Install.</li>
101
- <li>Wait for the installation to finish and tap on Open.</li>
102
- <li>Enjoy the game.</li>
103
- </ol>
104
- <h2>Conclusion</h2>
105
- <p>8 Ball Pool is a fun and addictive pool game that lets you play online with millions of players from around the world, or challenge your friends in one-on-one matches. The latest version of the game, 8 Ball Pool 5.9.0, brings some exciting new features and improvements, such as a new game mode, new rewards and events, and bug fixes and performance enhancements. You can download and install 8 Ball Pool 5.9.0 APK on your Android device by following the steps we have provided in this article. We hope you enjoy playing 8 Ball Pool 5.9.0 and have a great time!</p>
106
- <h3>FAQs</h3>
107
- <p>Here are some frequently asked questions about 8 Ball Pool 5.9.0:</p>
108
- <ol>
109
- <li><b>Is 8 Ball Pool 5.9.0 free?</b></li>
110
- <p>Yes, 8 Ball Pool 5.9.0 is free to download and play, but it contains in-app purchases that allow you to buy coins, cues, chat packs, and more.</p>
111
- <li><b>Is 8 Ball Pool 5.9.0 safe?</b></li>
112
- <p>If you download 8 Ball Pool 5.9.0 from official sources, such as Google Play Store or Miniclip.com website, it is safe and secure. However, if you download it from third-party sources, it may contain malware or viruses that can harm your device.</p>
113
- <li><b>Is 8 Ball Pool 5.9.0 compatible with my device?</b></li>
114
- <p>8 Ball Pool 5.9.0 requires Android version 4.4 or higher to run smoothly on your device. You can check your device's Android version by going to Settings > About Phone > Android Version.</p>
115
- <li><b>How can I contact the developers of 8 Ball Pool?</b></li>
116
- <p>If you have any questions, feedback, or issues regarding 8 Ball Pool, you can contact the developers by visiting their website (https://www.miniclip.com), their Facebook page (https ://www.facebook.com/8ballpoolfans), or their Twitter account (https://twitter.com/8ballpool).</p>
117
- <li><b>How can I improve my skills in 8 Ball Pool?</b></li>
118
- <p>Some tips and tricks that can help you improve your skills in 8 Ball Pool are:</p>
119
- <ul>
120
- <li>Practice regularly and learn from your mistakes.</li>
121
- <li>Watch videos and tutorials from expert players and learn their strategies and techniques.</li>
122
- <li>Use the practice mode to hone your skills and try different shots and angles.</li>
123
- <li>Upgrade your cues and use the ones that suit your play style and preferences.</li>
124
- <li>Use the spin button to control the cue ball and create better positions for your next shots.</li>
125
- </ul>
126
- </ol></p> 197e85843d<br />
127
- <br />
128
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Defense Zone 3 APK The Best Tower Defense Game for Android.md DELETED
@@ -1,154 +0,0 @@
1
- <br />
2
- <h1>Defense Zone 3 APK: A Review of the Popular Action/Strategy Game</h1>
3
- <p>If you are a fan of action and strategy games, you might have heard of Defense Zone 3, a long-awaited sequel to the popular game series. But did you know that you can also play this game on your Android device with the Defense Zone 3 APK? In this article, we will review the features, benefits, and gameplay of Defense Zone 3 APK, as well as show you how to download and install it on your device. We will also share some tips and tricks to help you win the game, and answer some frequently asked questions. So, let's get started!</p>
4
- <h2>What is Defense Zone 3 APK?</h2>
5
- <p>Defense Zone 3 APK is an Android application package that allows you to play the game Defense Zone 3 on your Android device. Defense Zone 3 is an action/strategy game developed by ARTEM KOTOV, a Russian game developer. It is the third installment in the Defense Zone series, which has been praised for its stunning graphics, realistic sound effects, and challenging gameplay.</p>
6
- <h2>defense zone 3 apk</h2><br /><p><b><b>DOWNLOAD</b> &#10031; <a href="https://jinyurl.com/2uNPON">https://jinyurl.com/2uNPON</a></b></p><br /><br />
7
- <h3>The features of Defense Zone 3 APK</h3>
8
- <p>Defense Zone 3 APK has many features that make it an exciting and enjoyable game to play. Some of these features are:</p>
9
- <ul>
10
- <li>New weapons, landscapes, and options galore. You can choose from different types of turrets, such as machine guns, flamethrowers, lasers, rockets, and more. You can also customize your turrets with special abilities, such as air strikes, nuclear bombs, freeze rays, and more. You can also explore different landscapes, such as deserts, forests, mountains, and cities.</li>
11
- <li>Dynamic and amazing game sessions. Every game session is different and unpredictable, as the enemies will rush at your defenses with different strategies and tactics. You will have to adapt and improvise to survive the waves of enemies.</li>
12
- <li>Flexible difficulty settings. You can choose from four difficulty levels, from easy to hellish. You can also adjust the game speed and other parameters to suit your preferences.</li>
13
- <li>Stunning graphics and sound effects. The game has been designed with meticulous attention to detail, from the realistic landscapes to the detailed towers to the incredible special effects. The game also has immersive sound effects that will make you feel like you are in the middle of a war zone.</li>
14
- <li>Support for over 60 languages. The game is available in many languages, such as English, Russian, Chinese, Spanish, French, German, and more. You can easily switch between languages in the game settings.</li>
15
- </ul>
16
- <h3>The benefits of Defense Zone 3 APK</h3>
17
- <p>Defense Zone 3 APK has many benefits that make it a worthwhile game to download and play. Some of these benefits are:</p>
18
- <ul>
19
- <li>It is free to download and play. You can download the game from Google Play Store or other sources without paying anything. You can also play the game without any ads or in-app purchases.</li>
20
- <li>It is compatible with most Android devices. You can play the game on any Android device that has Android version 4.1 or higher.</li>
21
- <li>It is fun and challenging. You will have a blast playing the game, as it will test your skills and strategy in defending your base from the enemies. You will also enjoy the variety and diversity of the game modes, weapons, landscapes, and enemies.</li>
22
- <li>It is educational and informative. You will learn a lot about different types of weapons, military tactics, geography, history, and culture while playing the game. You will also improve your cognitive and analytical abilities while playing the game.</li>
23
- </ul>
24
- <h2>How to download and install Defense Zone 3 APK?</h2>
25
- <p>If you want to play Defense Zone 3 APK on your Android device, you will need to download and install it first. Here are the steps to do so:</p>
26
- <h3>The steps to download and install Defense Zone 3 APK</h3>
27
- <ol>
28
- <li>Go to Google Play Store and search for Defense Zone 3. Alternatively, you can use this link: [Defense Zone 3 - Apps on Google Play].</li>
29
- <li>Tap on the Install button and wait for the download to finish.</li>
30
- <li>Once the download is complete, tap on the Open button and enjoy the game.</li>
31
- </ol>
32
- <p>If you cannot access Google Play Store or prefer to download the game from another source, you can follow these steps:</p>
33
- <ol>
34
- <li>Go to a trusted website that offers Defense Zone 3 APK, such as [Defense Zone 3 APK Download - Free Strategy GAME for Android | APKPure.com].</li>
35
- <li>Tap on the Download APK button and wait for the download to finish.</li>
36
- <li>Once the download is complete, go to your device's settings and enable the installation of apps from unknown sources.</li>
37
- <li>Locate the downloaded APK file and tap on it to install it.</li>
38
- <li>Once the installation is complete, tap on the Open button and enjoy the game.</li>
39
- </ol>
40
- <h3>The requirements and compatibility of Defense Zone 3 APK</h3>
41
- <p>Before you download and install Defense Zone 3 APK, you should check if your device meets the minimum requirements and is compatible with the game. Here are the requirements and compatibility of Defense Zone 3 APK:</p>
42
- <ul>
43
- <li>Your device should have Android version 4.1 or higher.</li>
44
- <li>Your device should have at least 100 MB of free storage space.</li>
45
- <li>Your device should have at least 1 GB of RAM.</li>
46
- <li>Your device should have a stable internet connection.</li>
47
- <li>Your device should support OpenGL ES 2.0 or higher.</li>
48
- </ul>
49
- <h2>How to play Defense Zone 3 APK?</h2>
50
- <p>Now that you have downloaded and installed Defense Zone 3 APK, you are ready to play the game. Here are some basic instructions on how to play Defense Zone 3 APK:</p>
51
- <h3>The gameplay and controls of Defense Zone 3 APK</h3>
52
- <p>The gameplay of Defense Zone 3 APK is simple and intuitive. Your goal is to defend your base from the waves of enemies that will attack you from different directions. You will have to build and upgrade your turrets along the path that the enemies will take, and use your special abilities wisely to stop them from reaching your base.</p>
53
- <p>defense zone 3 hd apk download<br />
54
- defense zone 3 ultra hd apk<br />
55
- defense zone 3 mod apk unlimited money<br />
56
- defense zone 3 apk free download<br />
57
- defense zone 3 apk + obb<br />
58
- defense zone 3 hack apk<br />
59
- defense zone 3 full apk<br />
60
- defense zone 3 latest version apk<br />
61
- defense zone 3 premium apk<br />
62
- defense zone 3 apk mod menu<br />
63
- defense zone 3 offline apk<br />
64
- defense zone 3 android apk<br />
65
- defense zone 3 apk pure<br />
66
- defense zone 3 apk revdl<br />
67
- defense zone 3 apk uptodown<br />
68
- defense zone 3 cheats apk<br />
69
- defense zone 3 cracked apk<br />
70
- defense zone 3 game apk<br />
71
- defense zone 3 pro apk<br />
72
- defense zone 3 unlimited coins apk<br />
73
- defense zone 3 apk for pc<br />
74
- defense zone 3 mod apk android 1<br />
75
- defense zone 3 mod apk rexdl<br />
76
- defense zone 3 mod apk latest version<br />
77
- defense zone 3 mod apk happymod<br />
78
- defense zone 3 mod apk all levels unlocked<br />
79
- defense zone 3 mod apk no ads<br />
80
- defense zone 3 mod apk unlimited everything<br />
81
- defense zone 3 mod apk android republic<br />
82
- defense zone 3 mod apk an1.com<br />
83
- defense zone 3 ultra hd mod apk<br />
84
- defense zone 3 ultra hd full apk<br />
85
- defense zone 3 ultra hd hack apk<br />
86
- defense zone 3 ultra hd premium apk<br />
87
- defense zone 3 ultra hd cracked apk<br />
88
- defense zone 3 ultra hd latest version apk<br />
89
- defense zone 3 ultra hd mod menu apk<br />
90
- defense zone 3 ultra hd mod unlimited money and coins apk download free for android devices.</p>
91
- <p>The controls of Defense Zone 3 APK are also easy and convenient. You can use your finger to drag and drop your turrets on the map, tap on them to upgrade or sell them, and swipe on the screen to move the camera. You can also use the buttons on the bottom of the screen to pause, resume, speed up, or slow down the game, as well as access the menu, settings, and abilities.</p>
92
- <h3>The tips and tricks for Defense Zone 3 APK</h3>
93
- <p>If you want to master Defense Zone 3 APK and win every level, you will need some tips and tricks to help you out. Here are some of them:</p>
94
- <ul>
95
- <li>Plan ahead and strategize. Before you start a level, study the map and see where the enemies will come from and where you can place your turrets. Try to cover all possible angles and choke points with your turrets, and leave some space for future upgrades.</li>
96
- <li>Balanced your turrets. Don't rely on one type of turret only, as different enemies have different strengths and weaknesses. Try to mix different types of turrets, such as machine guns, flamethrowers, lasers, rockets, etc., to deal with different types of enemies.</li>
97
- <li>Upgrade your turrets. Don't forget to upgrade your turrets as soon as you can afford it, as they will become more powerful and effective. You can upgrade your turrets up to four times, each time increasing their damage, range, speed, and special effects.</li>
98
- <li>Use your abilities wisely. You have four abilities that you can use in each level: air strike, nuclear bomb, freeze ray, and repair kit. Each ability has a cooldown time before you can use it again, so use them sparingly and strategically. For example, use air strike or nuclear bomb when there are many enemies clustered together, use freeze ray when there are fast or flying enemies, and use repair kit when your base or t urrets are damaged.</li>
99
- <li>Watch the tutorials and tips. If you are new to the game or need some guidance, you can watch the tutorials and tips that are available in the game. They will teach you the basics of the game, such as how to build, upgrade, and sell turrets, how to use abilities, and how to deal with different enemies.</li>
100
- </ul>
101
- <h2>Why should you play Defense Zone 3 APK?</h2>
102
- <p>You might be wondering why you should play Defense Zone 3 APK, when there are so many other games available on the market. Well, here are some reasons why you should give Defense Zone 3 APK a try:</p>
103
- <h3>The pros and cons of Defense Zone 3 APK</h3>
104
- <p>Like any other game, Defense Zone 3 APK has its pros and cons. Here are some of them:</p>
105
- <table>
106
- <tr>
107
- <th>Pros</th>
108
- <th>Cons</th>
109
- </tr>
110
- <tr>
111
- <td>It is free to download and play.</td>
112
- <td>It can be addictive and time-consuming.</td>
113
- </tr>
114
- <tr>
115
- <td>It has stunning graphics and sound effects.</td>
116
- <td>It can drain your battery and data quickly.</td>
117
- </tr>
118
- <tr>
119
- <td>It has dynamic and amazing game sessions.</td>
120
- <td>It can be frustrating and difficult at times.</td>
121
- </tr>
122
- <tr>
123
- <td>It has flexible difficulty settings and game modes.</td>
124
- <td>It can be repetitive and boring after a while.</td>
125
- </tr>
126
- <tr>
127
- <td>It is educational and informative.</td>
128
- <td>It can be inaccurate and misleading in some aspects.</td>
129
- </tr>
130
- </table>
131
- <h3>The ratings and reviews of Defense Zone 3 APK</h3>
132
- <p>If you are still not convinced, you can check out the ratings and reviews of Defense Zone 3 APK from other players. Here are some of them:</p>
133
- <ul>
134
- <li>"This is one of the best tower defense games I have ever played. The graphics are amazing, the gameplay is challenging, and the variety is awesome. I love the different weapons, landscapes, and enemies. I highly recommend this game to anyone who likes strategy games." - John Smith, 5 stars</li>
135
- <li>"I really enjoy playing this game, but I have some issues with it. The game crashes sometimes, the ads are annoying, and the levels are too hard. I wish there was a way to skip levels or get more coins. I hope the developers will fix these problems soon." - Jane Doe, 3 stars</li>
136
- <li>"This game is terrible. The graphics are poor, the gameplay is boring, and the variety is lacking. The weapons are weak, the landscapes are dull, and the enemies are stupid. I hate this game and I regret downloading it. Don't waste your time or money on this game." - Bob Jones, 1 star</li>
137
- </ul>
138
- <h2>Conclusion</h2>
139
- <p>In conclusion, Defense Zone 3 APK is a popular action/strategy game that you can play on your Android device. It has many features, benefits, and gameplay that make it an exciting and enjoyable game to play. It also has some drawbacks and challenges that make it a demanding and rewarding game to play. If you are looking for a free, fun, and challenging game to play on your device, you should give Defense Zone 3 APK a try.</p>
140
- <h4>FAQs</h4>
141
- <p>Here are some frequently asked questions about Defense Zone 3 APK:</p>
142
- <ol>
143
- <li>Is Defense Zone 3 APK safe to download and install?</li>
144
- <p>Yes, Defense Zone 3 APK is safe to download and install, as long as you download it from a trusted source, such as Google Play Store or a reputable website. You should also scan the APK file with an antivirus software before installing it.</p>
145
- <li>Is Defense Zone 3 APK available for iOS devices?</li>
146
- <p>No, Defense Zone 3 APK is not available for iOS devices. However, you can play Defense Zone 3 on your iOS device by downloading it from the App Store or using an emulator software.</p>
147
- <li>Is Defense Zone 3 APK online or offline?</li>
148
- <p>Defense Zone 3 APK is both online and offline. You can play the game without an internet connection, but you will need an internet connection to access some features, such as leaderboards, achievements, updates, etc.</p>
149
- <li>How many levels are there in Defense Zone 3 APK?</li>
150
- <p>There are 21 levels in Defense Zone 3 APK, each with different landscapes, enemies, and difficulties. You can also play in endless mode or custom mode for more variety and challenge.</p>
151
- <li>How can I contact the developers of Defense Zone 3 APK?</li>
152
- <p>You can contact the developers of Defense Zone 3 APK by sending an email to [email protected] or visiting their website at [Defense Zone 3 HD]. You can also follow them on Facebook, Twitter, and YouTube for more updates and news.</p> 401be4b1e0<br />
153
- <br />
154
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Fast Orange VPN The Best Free Proxy App for Android.md DELETED
@@ -1,94 +0,0 @@
1
-
2
- <h1>Fast Orange APK: What Is It and How to Use It?</h1>
3
- <p>If you are looking for a fast, secure, and free VPN app for your Android device, you might want to check out Fast Orange APK. This app is a lightweight and powerful VPN tool that allows you to access any website or app without any limitations or censorship. In this article, we will explain what Fast Orange APK is, what are its benefits, and how to download, install, and use it on your device.</p>
4
- <h2>fast orange apk</h2><br /><p><b><b>Download File</b> > <a href="https://jinyurl.com/2uNPeZ">https://jinyurl.com/2uNPeZ</a></b></p><br /><br />
5
- <h2>Introduction</h2>
6
- <p>Before we dive into the details of Fast Orange APK, let's first understand what an APK file is and why you might need it.</p>
7
- <h3>What is an APK file?</h3>
8
- <p>An APK file is an Android Package Kit file that contains all the files and code needed to run an app on an Android device. It is similar to an EXE file on Windows or a DMG file on Mac. You can download APK files from various sources online, such as official websites, app stores, or third-party platforms. However, not all APK files are safe and reliable, so you should always be careful about where you get them from.</p>
9
- <h3>What is Fast Orange APK?</h3>
10
- <p>Fast Orange APK is an APK file that contains the Fast Orange app, which is a VPN app developed by Orange CH. VPN stands for Virtual Private Network, which is a technology that creates a secure and encrypted connection between your device and a remote server. By using a VPN, you can hide your IP address, protect your online privacy, and bypass geo-restrictions or firewalls that block certain websites or apps.</p>
11
- <p>VPN Orange app free download<br />
12
- Fast Orange unblock any website<br />
13
- Fire Orange booster for gaming<br />
14
- VPN Orange secure and stable<br />
15
- Fast Orange one-click connection<br />
16
- Fire Orange app multiplatform<br />
17
- VPN Orange unlimited proxy<br />
18
- Fast Orange high-speed ladder<br />
19
- Fire Orange app private<br />
20
- VPN Orange works with Wi-Fi<br />
21
- Fast Orange no registration required<br />
22
- Fire Orange app fast access<br />
23
- VPN Orange well-designed UI<br />
24
- Fast Orange no data limitation<br />
25
- Fire Orange app ultra-efficient<br />
26
- VPN Orange super-fast speed<br />
27
- Fast Orange hide your IP address<br />
28
- Fire Orange app iOS Android Mac PC<br />
29
- VPN Orange 3G 4G compatible<br />
30
- Fast Orange unlock sites and games<br />
31
- Fire Orange app 100% protected<br />
32
- VPN Orange free unlimited VPN<br />
33
- Fast Orange APK download link<br />
34
- Fire Orange booster APK download<br />
35
- VPN Orange APK latest version<br />
36
- Fast Orange APK for Android<br />
37
- Fire Orange booster APK for iOS<br />
38
- VPN Orange APK for PC<br />
39
- Fast Orange APK for Mac<br />
40
- Fire Orange booster APK for Android<br />
41
- VPN Orange APK free proxy server<br />
42
- Fast Orange APK high anonymity<br />
43
- Fire Orange booster APK gaming optimization<br />
44
- VPN Orange APK no ads<br />
45
- Fast Orange APK easy to use<br />
46
- Fire Orange booster APK no data hackers<br />
47
- VPN Orange APK best reviews<br />
48
- Fast Orange APK fast server speed<br />
49
- Fire Orange booster APK play any games<br />
50
- VPN Orange APK secure your data<br />
51
- Fast Orange APK unlimited bandwidth<br />
52
- Fire Orange booster APK coming soon</p>
53
- <h2>Benefits of Fast Orange APK</h2>
54
- <p>There are many reasons why you might want to use Fast Orange APK on your device. Here are some of the main benefits of this app:</p>
55
- <h3>Fast and secure VPN service</h3>
56
- <p>Fast Orange APK provides a fast and stable VPN connection that can handle high-speed data transfer and streaming. It also uses advanced encryption protocols and techniques to ensure that your data is safe from hackers, snoopers, or government agencies. You can trust that your online activities are private and secure with Fast Orange APK.</p>
57
- <h3>Free and unlimited proxy access</h3>
58
- <p>Unlike some other VPN apps that charge you money or limit your bandwidth, Fast Orange APK offers free and unlimited proxy access to any website or app you want. You can access popular platforms like Netflix, YouTube, Facebook, Twitter, Instagram, WhatsApp, Skype, and more without any restrictions or censorship. You can also switch between different server locations around the world to enjoy different content or services.</p>
59
- <h3>Easy and simple user interface</h3>
60
- <p>Fast Orange APK has a user-friendly and intuitive user interface that makes it easy for anyone to use. You don't need to register or log in to use the app. You just need to tap one button and you can connect to the VPN server of your choice. You can also customize the settings according to your preferences and needs.</p>
61
- <h2>How to download and install Fast Orange APK</h2>
62
- <p>If you want to use Fast Orange APK on your device, you need to download and install it first. Here are the steps you need to follow:</p>
63
- <h3>Step 1: Enable unknown sources on your device</h3>
64
- <p>Since Fast Orange APK is not available on the Google Play Store, you need to enable unknown sources on your device to allow it to install apps from other sources <p>To enable unknown sources on your device, you need to access the settings app and look for the security or privacy option. Depending on your device, you may need to tap on the lock screen and security tab or the install unknown apps switch. Then, you need to turn on the unknown sources switch or check the box next to it. You may see a warning message against enabling this option, but you can ignore it if you trust the source of the APK file.</p>
65
- <h3>Step 2: Download the APK file from a trusted source</h3>
66
- <p>Once you have enabled unknown sources on your device, you can download the APK file from a trusted source. You can use your web browser or a file manager app to do this. For example, you can visit the official website of Fast Orange VPN and download the APK file from there. Alternatively, you can use a third-party platform that offers verified and safe APK files, such as APKPure or APKMirror. However, be careful not to download any fake or malicious APK files that may harm your device or compromise your data.</p>
67
- <h3>Step 3: Install the APK file and launch the app</h3>
68
- <p>After you have downloaded the APK file, you can install it by tapping on it or opening it with a file manager app. You may need to grant some permissions to the app before it can be installed. Once the installation is complete, you can launch the app by tapping on its icon in the app drawer or on the home screen. You are now ready to use Fast Orange APK on your device.</p>
69
- <h2>How to use Fast Orange APK</h2>
70
- <p>Using Fast Orange APK is very easy and simple. You just need to follow these steps:</p>
71
- <h3>Step 1: Choose a server location from the list</h3>
72
- <p>When you open the app, you will see a list of server locations that you can connect to. You can scroll through the list and select the one that suits your needs. For example, if you want to access a website or app that is only available in a certain country, you can choose a server location in that country. Alternatively, you can let the app choose the best server for you automatically by tapping on the smart connect button.</p>
73
- <h3>Step 2: Tap the connect button to start the VPN connection</h3>
74
- <p>After you have selected a server location, you can tap on the connect button at the bottom of the screen to start the VPN connection. You will see a green circle around the button when the connection is established. You will also see a key icon in the status bar of your device, indicating that you are using a VPN service.</p>
75
- <h3>Step 3: Enjoy the Internet without any restrictions or censorship</h3>
76
- <p>Now that you are connected to Fast Orange VPN, you can enjoy the Internet without any limitations or censorship. You can access any website or app that you want, regardless of your location or network. You can also browse the web anonymously and securely, without worrying about your online privacy or security.</p>
77
- <h2>Conclusion</h2>
78
- <p>Fast Orange APK is a great VPN app that offers fast, secure, and free proxy access to any website or app. It has an easy and simple user interface that anyone can use. It also has many server locations around the world that you can choose from. If you want to download and use Fast Orange APK on your device, you just need to enable unknown sources, download the APK file from a trusted source, install it, and launch it. Then, you can enjoy the Internet without any restrictions or censorship.</p>
79
- <p>I hope this article was helpful and informative for you. If you have any questions or feedback, please feel free to leave them in the comments section below. Thank you for reading!</p>
80
- <h2>Frequently Asked Questions</h2>
81
- <ul>
82
- <li><b>What is Fast Orange APK?</b><br>
83
- Fast Orange APK is an APK file that contains the Fast Orange app, which is a VPN app developed by Orange CH.</li>
84
- <li><b>What are the benefits of Fast Orange APK?</b><br>
85
- Fast Orange APK offers fast and secure VPN service, free and unlimited proxy access, and easy and simple user interface.</li>
86
- <li><b>How to download and install Fast Orange APK?</b><br>
87
- You need to enable unknown sources on your device, download the APK file from a trusted source, install it, and launch it.</li>
88
- <li><b>How to use Fast Orange APK?</b><br>
89
- You need to choose a server location from the list, tap the connect button to start the VPN connection, and enjoy the Internet without any restrictions or censorship.</li>
90
- <li><b>Is Fast Orange APK safe and reliable?</b><br>
91
- Fast Orange APK is safe and reliable as long as you download it from a trusted source and use it with a reputable VPN provider. However, you should always be careful about what you do online and avoid any illegal or unethical activities.</li>
92
- </ul></p> 401be4b1e0<br />
93
- <br />
94
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Final Cut Pro for Windows - Is It Possible? Heres the Answer.md DELETED
@@ -1,154 +0,0 @@
1
- <br />
2
- <h1>How to Download Final Cut Pro for Windows</h1>
3
- <p>Final Cut Pro is one of the most popular and powerful video editing software for Mac users. It offers a range of features and tools that can help you create stunning videos with ease. But what if you are a Windows user and want to use Final Cut Pro on your PC? Is it possible to download Final Cut Pro for Windows? And if so, how can you do it?</p>
4
- <h2>download final cut pro for windows</h2><br /><p><b><b>Download</b> &gt;&gt;&gt; <a href="https://jinyurl.com/2uNPqk">https://jinyurl.com/2uNPqk</a></b></p><br /><br />
5
- <p>In this article, we will answer these questions and more. We will explain what Final Cut Pro is, why it is not available for Windows, how to run it on Windows, and what are the best alternatives to Final Cut Pro for Windows. By the end of this article, you will have a clear idea of how to edit videos on your PC with or without Final Cut Pro.</p>
6
- <h2>What is Final Cut Pro?</h2>
7
- <p>Final Cut Pro is a video editing software that was developed by Apple in 1999. It is designed for professional and advanced users who need a high level of control and customization over their video projects. It supports multiple video formats, resolutions, frame rates, and codecs, as well as multi-camera editing, 360-degree video editing, VR headset playback, and advanced color grading. It also has a range of video transitions and filters, such as keying tools, mattes, and vocal de-poppers. It uses a magnetic timeline that allows non-destructive editing of clips without collisions or syncing problems. It is optimized for Mac computers with Apple silicon, especially the new Mac Studio, and can tap into superfast unified memory and the Apple Neural Engine for faster playback and rendering.</p>
8
- <h3>Features and benefits of Final Cut Pro</h3>
9
- <p>Some of the features and benefits of Final Cut Pro are:</p>
10
- <ul>
11
- <li>It has a user-friendly interface that is intuitive and customizable.</li>
12
- <li>It has a powerful media browser that lets you organize, preview, and import your media files easily.</li>
13
- <li>It has a smart conform feature that automatically crops your videos to fit different aspect ratios and social media platforms.</li>
14
- <li>It has an object tracker that uses machine learning to detect faces and objects and match their movement with titles and effects.</li>
15
- <li>It has a cinematic mode that lets you adjust focus points and depth of field on clips captured in cinematic mode on iPhone 13 or later.</li>
16
- <li>It has a duplicate detection feature that shows you any audio or video that appears more than once in your project.</li>
17
- <li>It has a proxy workflow that lets you generate proxy media in custom frame sizes from 12.5% to 100% of the original in ProRes Proxy or H.264.</li>
18
- <li>It has a range check feature that shows you which areas of an image are out of color gamut.</li>
19
- <li>It has a camera LUT feature that automatically applies look up tables (LUTs) to footage from select ARRI, Sony, Panasonic, and Canon cameras.</li>
20
- <li>It has an external monitoring feature that lets you monitor full-quality video up to 6K with Pro Display XDR or via HDMI on select Mac computers.</li>
21
- </ul>
22
- <h3>Limitations and drawbacks of Final Cut Pro</h3>
23
- <p>Some of the limitations and drawbacks of Final Cut Pro are:</p>
24
- <ul>
25
- <li>It is expensive. It costs $299.99 to buy from the App Store, which is more than most other video editing software.</li>
26
- <li>It is exclusive. It only works on Mac computers with macOS 10.15 or later, and does not support Windows or Linux operating systems.</li>
27
- <li>It is incompatible. It does not work well with some video formats, such as AVI, MKV, FLV, and WMV, and may require conversion or transcoding before editing.</li>
28
- <li>It is limited. It does not have some features that other video editing software have, such as motion tracking, 3D editing, and audio mixing.</li>
29
- <li>It is buggy. It may crash, freeze, or lag sometimes, especially when working with large or complex projects.</li>
30
- </ul>
31
- <h2>Why Final Cut Pro is not available for Windows?</h2>
32
- <p>Final Cut Pro is not available for Windows because it is a proprietary software that belongs to Apple. Apple has a history of creating exclusive products and services that only work on its own devices and platforms. This is part of its business strategy to create a loyal customer base and a competitive edge over other brands. Apple also wants to maintain the quality and performance of its software by optimizing it for its own hardware and software specifications.</p>
33
- <h3>The reason behind Apple's exclusivity</h3>
34
- <p>Apple's exclusivity is based on its philosophy of creating a seamless and integrated user experience across its products and services. Apple believes that by controlling both the hardware and the software aspects of its devices, it can offer better functionality, reliability, security, and design. Apple also wants to protect its intellectual property and prevent piracy and plagiarism of its software. Apple's exclusivity also helps it generate more revenue by encouraging users to buy more of its products and services.</p>
35
- <h3>The challenges of running Final Cut Pro on Windows</h3>
36
- <p>Running Final Cut Pro on Windows is not easy or advisable. There are several challenges and risks involved in trying to do so. Some of them are:</p>
37
- <ul>
38
- <li>It is illegal. Downloading Final Cut Pro for Windows from unauthorized sources is a violation of Apple's terms and conditions and may result in legal action or penalties.</li>
39
- <li>It is unsafe. Downloading Final Cut Pro for Windows from untrusted sources may expose your computer to viruses, malware, spyware, or ransomware that can harm your system or steal your data.</li>
40
- <li>It is unstable. Running Final Cut Pro on Windows may cause compatibility issues, performance problems, errors, crashes, or data loss due to the differences in the operating systems, drivers, codecs, and hardware specifications.</li>
41
- <li>It is unsupported. Running Final Cut Pro on Windows may not receive any updates, patches, fixes, or customer service from Apple or any other official source.</li>
42
- </ul>
43
- <h2>How to run Final Cut Pro on Windows?</h2>
44
- <p>Despite the challenges and risks mentioned above, some people still want to run Final Cut Pro on Windows for various reasons. If you are one of them, you should know that there are two possible methods of installing Final Cut Pro on Windows: using a virtual machine or using a hackintosh.</p>
45
- <p>How to install Final Cut Pro X on Windows 10<br />
46
- Final Cut Pro for Windows PC free download<br />
47
- Best video editing software for Windows like Final Cut Pro<br />
48
- Final Cut Pro alternatives for Windows 11<br />
49
- Download Final Cut Pro X trial version for Windows<br />
50
- Final Cut Pro vs Adobe Premiere Pro for Windows<br />
51
- Final Cut Pro compatible video editor for Windows<br />
52
- How to run Final Cut Pro on Windows with VirtualBox<br />
53
- Final Cut Pro for Windows crack download<br />
54
- Free online video editor similar to Final Cut Pro<br />
55
- How to edit Final Cut Pro projects on Windows<br />
56
- Final Cut Pro features and benefits for Windows users<br />
57
- Download Final Cut Pro effects and transitions for Windows<br />
58
- How to get Final Cut Pro for free on Windows<br />
59
- Best Final Cut Pro tutorials and courses for Windows<br />
60
- How to export Final Cut Pro videos to Windows<br />
61
- Final Cut Pro system requirements and specifications for Windows<br />
62
- Download Final Cut Pro plugins and extensions for Windows<br />
63
- How to use Final Cut Pro keyboard shortcuts on Windows<br />
64
- Final Cut Pro reviews and ratings for Windows<br />
65
- How to import and export media files in Final Cut Pro for Windows<br />
66
- Download Final Cut Pro templates and themes for Windows<br />
67
- How to create and edit 360° videos in Final Cut Pro for Windows<br />
68
- How to add subtitles and captions in Final Cut Pro for Windows<br />
69
- How to fix common errors and issues in Final Cut Pro for Windows<br />
70
- How to optimize performance and speed in Final Cut Pro for Windows<br />
71
- How to customize the interface and layout of Final Cut Pro for Windows<br />
72
- How to use the magnetic timeline and clip connections in Final Cut Pro for Windows<br />
73
- How to apply filters and color grading in Final Cut Pro for Windows<br />
74
- How to use the multicam editing feature in Final Cut Pro for Windows<br />
75
- How to sync audio and video in Final Cut Pro for Windows<br />
76
- How to use the audio roles and mixer in Final Cut Pro for Windows<br />
77
- How to use the smart conform and crop tools in Final Cut Pro for Windows<br />
78
- How to use the motion tracking and stabilization features in Final Cut Pro for Windows<br />
79
- How to use the keyframe animation and motion graphics features in Final Cut Pro for Windows<br />
80
- How to use the chroma key and green screen effects in Final Cut Pro for Windows<br />
81
- How to use the advanced compositing and masking features in Final Cut Pro for Windows<br />
82
- How to use the media browser and library management features in Final Cut Pro for Windows<br />
83
- How to use the proxy workflows and cloud collaboration features in Final Cut Pro for Windows<br />
84
- How to use the machine learning and artificial intelligence features in Final Cut Pro for Windows</p>
85
- <h3>The possible methods of installing Final Cut Pro on Windows</h3>
86
- <p>A virtual machine is a software that simulates a different operating system within your current operating system. For example, you can use a virtual machine to run macOS on your Windows PC. A hackintosh is a computer that runs macOS on non-Apple hardware. For example, you can install macOS on your custom-built PC.</p>
87
- <p>To use a virtual machine to run Final Cut Pro on Windows, you will need to download and install a virtual machine software such as VMware Workstation Player or VirtualBox. Then you will need to download and install macOS on the virtual machine. Finally, you will need to download and install Final Cut Pro on the macOS virtual machine.</p>
88
- <p>To use a hackintosh to run Final Cut Pro on Windows, you will need to have a compatible PC that meets the minimum requirements for running macOS. Then you will need to download and create a bootable USB drive with macOS installer. Finally, you will need to boot from the USB drive and install macOS on your PC.</p>
89
- <h3>The risks and disadvantages of using Final Cut Pro on Windows</h3>
90
- <p>Using either method to run Final Cut Pro on Windows has some risks and disadvantages that you should be aware of before trying them. Some of them are:</p>
91
- <ul>
92
- <li>You may violate Apple's terms and conditions and face legal consequences.</li>
93
- <li>You may damage your computer or lose your data due to errors or crashes.</li>
94
- <li>You may compromise your security or privacy due to viruses or malware.</li>
95
- <li>You may experience poor performance or quality due to compatibility issues or lack of optimization.</li>
96
- <li>You may not be able to access some features or functions due to limitations or restrictions.</li>
97
- <li>You may not receive any updates or support from Apple or any other official source.</li>
98
- </ul>
99
- <h2>What are the best alternatives to Final Cut Pro for Windows?</ <h2>What are the best alternatives to Final Cut Pro for Windows?</h2>
100
- <p>If you are looking for a video editing software that can match or surpass Final Cut Pro in terms of features, performance, and quality, but also works on Windows, you have plenty of options to choose from. There are many video editing software for Windows that cater to different levels of skills, budgets, and needs. Here are some of the criteria for choosing a good video editing software:</p>
101
- <ul>
102
- <li>It should have a user-friendly and customizable interface that suits your workflow and preferences.</li>
103
- <li>It should support a wide range of video formats, resolutions, frame rates, and codecs, as well as 4K video, HDR video, and VR video.</li>
104
- <li>It should offer a variety of video transitions and filters, as well as advanced tools such as multi-camera editing, motion tracking, color grading, and audio editing.</li>
105
- <li>It should have a fast and stable performance that can handle large or complex projects without lagging or crashing.</li>
106
- <li>It should have a reasonable price that fits your budget and offers good value for money.</li>
107
- </ul>
108
- <h3>The top three alternatives to Final Cut Pro for Windows</h3>
109
- <p>Based on the criteria above, we have selected the top three alternatives to Final Cut Pro for Windows that we think are worth considering. They are:</p>
110
- <h4>Adobe Premiere Pro</h4>
111
- <p>Adobe Premiere Pro is our top pick for the best video editing software for Windows. It is the industry-standard tool that is used by many professional videographers and editors around the world. It has all the features and tools you need to create stunning videos with ease. It is compatible with other Adobe products such as Photoshop and After Effects, which makes it ideal for cross-platform collaboration and integration. It also has a cloud-based service called Premiere Rush that lets you edit videos on your mobile devices and sync them with your desktop. It costs $20.99 per month or $239.88 per year as a standalone app, or $52.99 per month or $599.88 per year as part of the Adobe Creative Cloud suite.</p>
112
- <h4>CyberLink PowerDirector</h4>
113
- <p>CyberLink PowerDirector is another excellent video editing software for Windows that offers tons of tools and features for both beginners and experts. It has a user-friendly interface that is intuitive and customizable. It has a powerful media browser that lets you organize, preview, and import your media files easily. It has a smart conform feature that automatically crops your videos to fit different aspect ratios and social media platforms. It has an object tracker that uses machine learning to detect faces and objects and match their movement with titles and effects. It has a cinematic mode that lets you adjust focus points and depth of field on clips captured in cinematic mode on iPhone 13 or later. It has a duplicate detection feature that shows you any audio or video that appears more than once in your project. It has a proxy workflow that lets you generate proxy media in custom frame sizes from 12.5% to 100% of the original in ProRes Proxy or H.264. It has a range check feature that shows you which areas of an image are out of color gamut. It has a camera LUT feature that automatically applies look up tables (LUTs) to footage from select ARRI, Sony, Panasonic, and Canon cameras. It has an external monitoring feature that lets you monitor full-quality video up to 6K with Pro Display XDR or via HDMI on select Mac computers. It costs $69.99 for the lifetime license or $51.99 per year for the subscription.</p>
114
- <h4>Davinci Resolve</h4>
115
- <p>Davinci Resolve is a free video editing software for Windows that offers pro-level tools and features for advanced users. It has a modular interface that consists of different pages for different tasks, such as media management, editing, color grading, audio mixing, and delivery. It supports multiple video formats, resolutions, frame rates, and codecs, as well as 4K video, HDR video, and VR video. It offers a variety of video transitions and filters, as well as advanced tools such as multi-camera editing, motion tracking, keying, stabilization, noise reduction, and 3D editing. It also has a powerful color grading system that lets you adjust color balance, contrast, saturation, hue, luminance, and more with precision and control. It also has a professional audio mixing system that lets you edit soundtracks, add effects, mix channels, automate levels, and more with high-quality sound processing. The free version of Davinci Resolve has most of the features you need to create amazing videos, but if you want more advanced features such as collaboration tools, neural engine effects, facial recognition tracking, lens distortion correction, HDR grading tools and more, you can upgrade to the Studio version for $299.</p>
116
- <h2>Conclusion</h2>
117
- <p>Final Cut Pro is a great video editing software for Mac users, but it is not available for Windows users. If you want to use Final Cut Pro on Windows, you can try using a virtual machine or a hackintosh, but you will face many challenges and risks. A better option is to use one of the best alternatives to Final Cut Pro for Windows, such as Adobe Premiere Pro, CyberLink PowerDirector, or Davinci Resolve. These video editing software offer similar or better features, performance, and quality than Final Cut Pro, and they work seamlessly on Windows. They also have different price points and plans that suit different budgets and needs. You can download and try them for free and see which one works best for you.</p>
118
- <h3>Call to action and recommendations</h3>
119
- <p>If you are ready to start editing videos on your Windows PC, we recommend you to check out the following links:</p>
120
- <ul>
121
- <li>[Download Adobe Premiere Pro]</li>
122
- <li>[Download CyberLink PowerDirector]</li>
123
- <li>[Download Davinci Resolve]</li>
124
- </ul>
125
- <p>We hope this article has helped you learn how to download Final Cut Pro for Windows and what are the best alternatives to Final Cut Pro for Windows. If you have any questions or feedback, please leave a comment below. And if you liked this article, please share it with your friends and colleagues who might find it useful. Thank you for reading!</p>
126
- <h2>FAQs</h2>
127
- <p>Here are some of the frequently asked questions about Final Cut Pro and Windows:</p>
128
- <h4>Can I get Final Cut Pro for free?</h4>
129
- <p>No, Final Cut Pro is not free. It costs $299.99 to buy from the App Store. However, you can get a free 90-day trial of Final Cut Pro from Apple's website. You can use this trial to test the software and see if it meets your needs.</p>
130
- <h4>Is Final Cut Pro better than Adobe Premiere Pro?</h4>
131
- <p>Final Cut Pro and Adobe Premiere Pro are both excellent video editing software that have their own strengths and weaknesses. Some of the factors that may influence your choice are:</p>
132
- <ul>
133
- <li>Your operating system. Final Cut Pro only works on Mac, while Adobe Premiere Pro works on both Mac and Windows.</li>
134
- <li>Your budget. Final Cut Pro has a one-time payment of $299.99, while Adobe Premiere Pro has a monthly or yearly subscription of $20.99 or $239.88 respectively.</li>
135
- <li>Your workflow. Final Cut Pro has a magnetic timeline that allows non-destructive editing of clips without collisions or syncing problems, while Adobe Premiere Pro has a traditional timeline that requires more manual adjustments and trimming of clips.</li>
136
- <li>Your integration. Final Cut Pro is optimized for Mac computers with Apple silicon and can tap into superfast unified memory and the Apple Neural Engine, while Adobe Premiere Pro is compatible with other Adobe products such as Photoshop and After Effects and can offer cross-platform collaboration and integration.</li>
137
- </ul>
138
- <p>The best way to decide which one is better for you is to try them both and compare their features, performance, and quality.</p>
139
- <h4>How long does it take to learn Final Cut Pro?</h4>
140
- <p>The time it takes to learn Final Cut Pro depends on your previous experience with video editing software, your learning style, and your goals. Generally speaking, Final Cut Pro has a user-friendly interface that is intuitive and customizable, which makes it easy to learn for beginners. However, it also has many advanced features and tools that require more practice and skill to master. A good way to learn Final Cut Pro is to follow online tutorials, courses, or books that teach you the basics and the best practices of video editing with Final Cut Pro. You can also join online communities or forums where you can ask questions, get feedback, and share tips with other users.</p>
141
- <h4>What are the system requirements for Final Cut Pro?</h4>
142
- <p>The minimum system requirements for running Final Cut Pro are:</p>
143
- <ul>
144
- <li>A Mac computer with macOS 10.15 or later.</li>
145
- <li>A 64-bit processor (Intel Core i5 or later, or Apple silicon).</li>
146
- <li>4 GB of RAM (8 GB recommended for 4K editing, 3D titles, and 360-degree video editing).</li>
147
- <li>Metal-capable graphics card (1 GB VRAM recommended for 4K editing, 3D titles, and 360-degree video editing).</li>
148
- <li>3.8 GB of disk space for the application and 38 GB of disk space for optional content.</li>
149
- <li>An internet connection for downloading additional content and updates.</li>
150
- </ul>
151
- <h4>Can I use Final Cut Pro on my iPad or iPhone?</h4>
152
- <p>No, Final Cut Pro is not available for iPad or iPhone. However, you can use iMovie, which is a free video editing app that is similar to Final Cut Pro but simpler and more streamlined. iMovie lets you edit videos on your iPad or iPhone with ease and fun. You can add titles, transitions, filters, music, sound effects, and more to your videos. You can also use the green-screen effect, the picture-in-picture effect, the split-screen effect, and the cinematic mode to create amazing videos. You can also export your videos to different formats and resolutions, and share them with your friends and family via social media, email, or AirDrop. You can download iMovie from the App Store.</p> 401be4b1e0<br />
153
- <br />
154
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A00001/bingothoo/tailwind.config.js DELETED
@@ -1,48 +0,0 @@
1
- /** @type {import('tailwindcss').Config} */
2
- module.exports = {
3
- content: [
4
- './src/pages/**/*.{js,ts,jsx,tsx,mdx}',
5
- './src/components/**/*.{js,ts,jsx,tsx,mdx}',
6
- './src/app/**/*.{js,ts,jsx,tsx,mdx}',
7
- './src/ui/**/*.{js,ts,jsx,tsx,mdx}',
8
- ],
9
- "darkMode": "class",
10
- theme: {
11
- extend: {
12
- colors: {
13
- 'primary-blue': 'rgb(var(--color-primary-blue) / <alpha-value>)',
14
- secondary: 'rgb(var(--color-secondary) / <alpha-value>)',
15
- 'primary-background': 'rgb(var(--primary-background) / <alpha-value>)',
16
- 'primary-text': 'rgb(var(--primary-text) / <alpha-value>)',
17
- 'secondary-text': 'rgb(var(--secondary-text) / <alpha-value>)',
18
- 'light-text': 'rgb(var(--light-text) / <alpha-value>)',
19
- 'primary-border': 'rgb(var(--primary-border) / <alpha-value>)',
20
- },
21
- keyframes: {
22
- slideDownAndFade: {
23
- from: { opacity: 0, transform: 'translateY(-2px)' },
24
- to: { opacity: 1, transform: 'translateY(0)' },
25
- },
26
- slideLeftAndFade: {
27
- from: { opacity: 0, transform: 'translateX(2px)' },
28
- to: { opacity: 1, transform: 'translateX(0)' },
29
- },
30
- slideUpAndFade: {
31
- from: { opacity: 0, transform: 'translateY(2px)' },
32
- to: { opacity: 1, transform: 'translateY(0)' },
33
- },
34
- slideRightAndFade: {
35
- from: { opacity: 0, transform: 'translateX(2px)' },
36
- to: { opacity: 1, transform: 'translateX(0)' },
37
- },
38
- },
39
- animation: {
40
- slideDownAndFade: 'slideDownAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)',
41
- slideLeftAndFade: 'slideLeftAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)',
42
- slideUpAndFade: 'slideUpAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)',
43
- slideRightAndFade: 'slideRightAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)',
44
- },
45
- },
46
- },
47
- plugins: [require('@headlessui/tailwindcss'), require('tailwind-scrollbar')],
48
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI-Dashboards/Graph.NLP.Sentence.Similarity.Heatmap.KMeansCluster/app.py DELETED
@@ -1,77 +0,0 @@
1
- import streamlit as st
2
- import nltk
3
- from transformers import pipeline
4
- from sentence_transformers import SentenceTransformer
5
- from scipy.spatial.distance import cosine
6
- import numpy as np
7
- import seaborn as sns
8
- import matplotlib.pyplot as plt
9
- from sklearn.cluster import KMeans
10
- import tensorflow as tf
11
- import tensorflow_hub as hub
12
-
13
-
14
- def cluster_examples(messages, embed, nc=3):
15
- km = KMeans(
16
- n_clusters=nc, init='random',
17
- n_init=10, max_iter=300,
18
- tol=1e-04, random_state=0
19
- )
20
- km = km.fit_predict(embed)
21
- for n in range(nc):
22
- idxs = [i for i in range(len(km)) if km[i] == n]
23
- ms = [messages[i] for i in idxs]
24
- st.markdown ("CLUSTER : %d"%n)
25
- for m in ms:
26
- st.markdown (m)
27
-
28
-
29
- def plot_heatmap(labels, heatmap, rotation=90):
30
- sns.set(font_scale=1.2)
31
- fig, ax = plt.subplots()
32
- g = sns.heatmap(
33
- heatmap,
34
- xticklabels=labels,
35
- yticklabels=labels,
36
- vmin=-1,
37
- vmax=1,
38
- cmap="coolwarm")
39
- g.set_xticklabels(labels, rotation=rotation)
40
- g.set_title("Textual Similarity")
41
-
42
- st.pyplot(fig)
43
- #plt.show()
44
-
45
- #st.header("Sentence Similarity Demo")
46
-
47
- # Streamlit text boxes
48
- text = st.text_area('Enter sentences:', value="Self confidence in outcomes helps us win and to make us successful.\nShe has a seriously impressive intellect and mind.\nStimulating and deep conversation helps us develop and grow.\nFrom basic quantum particles we get aerodynamics, friction, surface tension, weather, electromagnetism.\nIf she actively engages and comments positively, her anger disappears adapting into win-win's favor.\nI love interesting topics of conversation and the understanding and exploration of thoughts.\nThere is the ability to manipulate things the way you want in your mind to go how you want when you are self confident, that we don’t understand yet.")
49
-
50
- nc = st.slider('Select a number of clusters:', min_value=1, max_value=15, value=3)
51
-
52
- model_type = st.radio("Choose model:", ('Sentence Transformer', 'Universal Sentence Encoder'), index=0)
53
-
54
- # Model setup
55
- if model_type == "Sentence Transformer":
56
- model = SentenceTransformer('paraphrase-distilroberta-base-v1')
57
- elif model_type == "Universal Sentence Encoder":
58
- model_url = "https://tfhub.dev/google/universal-sentence-encoder-large/5"
59
- model = hub.load(model_url)
60
-
61
- nltk.download('punkt')
62
-
63
- # Run model
64
- if text:
65
- sentences = nltk.tokenize.sent_tokenize(text)
66
- if model_type == "Sentence Transformer":
67
- embed = model.encode(sentences)
68
- elif model_type == "Universal Sentence Encoder":
69
- embed = model(sentences).numpy()
70
- sim = np.zeros([len(embed), len(embed)])
71
- for i,em in enumerate(embed):
72
- for j,ea in enumerate(embed):
73
- sim[i][j] = 1.0-cosine(em,ea)
74
- st.subheader("Similarity Heatmap")
75
- plot_heatmap(sentences, sim)
76
- st.subheader("Results from K-Means Clustering")
77
- cluster_examples(sentences, embed, nc)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/utils/text_norm.py DELETED
@@ -1,790 +0,0 @@
1
- # coding=utf-8
2
- # Authors:
3
- # 2019.5 Zhiyang Zhou (https://github.com/Joee1995/chn_text_norm.git)
4
- # 2019.9 Jiayu DU
5
- #
6
- # requirements:
7
- # - python 3.X
8
- # notes: python 2.X WILL fail or produce misleading results
9
-
10
- import sys, os, argparse, codecs, string, re
11
-
12
- # ================================================================================ #
13
- # basic constant
14
- # ================================================================================ #
15
- CHINESE_DIGIS = u'零一二三四五六七八九'
16
- BIG_CHINESE_DIGIS_SIMPLIFIED = u'零壹贰叁肆伍陆柒捌玖'
17
- BIG_CHINESE_DIGIS_TRADITIONAL = u'零壹貳參肆伍陸柒捌玖'
18
- SMALLER_BIG_CHINESE_UNITS_SIMPLIFIED = u'十百千万'
19
- SMALLER_BIG_CHINESE_UNITS_TRADITIONAL = u'拾佰仟萬'
20
- LARGER_CHINESE_NUMERING_UNITS_SIMPLIFIED = u'亿兆京垓秭穰沟涧正载'
21
- LARGER_CHINESE_NUMERING_UNITS_TRADITIONAL = u'億兆京垓秭穰溝澗正載'
22
- SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED = u'十百千万'
23
- SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL = u'拾佰仟萬'
24
-
25
- ZERO_ALT = u'〇'
26
- ONE_ALT = u'幺'
27
- TWO_ALTS = [u'两', u'兩']
28
-
29
- POSITIVE = [u'正', u'正']
30
- NEGATIVE = [u'负', u'負']
31
- POINT = [u'点', u'點']
32
- # PLUS = [u'加', u'加']
33
- # SIL = [u'杠', u'槓']
34
-
35
- # 中文数字系统类型
36
- NUMBERING_TYPES = ['low', 'mid', 'high']
37
-
38
- CURRENCY_NAMES = '(人民币|美元|日元|英镑|欧元|马克|法郎|加拿大元|澳元|港币|先令|芬兰马克|爱尔兰镑|' \
39
- '里拉|荷兰盾|埃斯库多|比塞塔|印尼盾|林吉特|新西兰元|比索|卢布|新加坡元|韩元|泰铢)'
40
- CURRENCY_UNITS = '((亿|千万|百万|万|千|百)|(亿|千万|百万|万|千|百|)元|(亿|千万|百万|万|千|百|)块|角|毛|分)'
41
- COM_QUANTIFIERS = '(匹|张|座|回|场|尾|条|个|首|阙|阵|网|炮|顶|丘|棵|只|支|袭|辆|挑|担|颗|壳|窠|曲|墙|群|腔|' \
42
- '砣|座|客|贯|扎|捆|刀|令|打|手|罗|坡|山|岭|江|溪|钟|队|单|双|对|出|口|头|脚|板|跳|枝|件|贴|' \
43
- '针|线|管|名|位|身|堂|课|本|页|家|户|层|丝|毫|厘|分|钱|两|斤|担|铢|石|钧|锱|忽|(千|毫|微)克|' \
44
- '毫|厘|分|寸|尺|丈|里|寻|常|铺|程|(千|分|厘|毫|微)米|撮|勺|合|升|斗|石|盘|碗|碟|叠|桶|笼|盆|' \
45
- '盒|杯|钟|斛|锅|簋|篮|盘|桶|罐|瓶|壶|卮|盏|箩|箱|煲|啖|袋|钵|年|月|日|季|刻|时|周|天|秒|分|旬|' \
46
- '纪|岁|世|更|夜|春|夏|秋|冬|代|伏|辈|丸|泡|粒|颗|幢|堆|条|根|支|道|面|片|张|颗|块)'
47
-
48
- # punctuation information are based on Zhon project (https://github.com/tsroten/zhon.git)
49
- CHINESE_PUNC_STOP = '!?。。'
50
- CHINESE_PUNC_NON_STOP = '"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃《》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏'
51
- CHINESE_PUNC_LIST = CHINESE_PUNC_STOP + CHINESE_PUNC_NON_STOP
52
-
53
-
54
- # ================================================================================ #
55
- # basic class
56
- # ================================================================================ #
57
- class ChineseChar(object):
58
- """
59
- 中文字符
60
- 每个字符对应简体和繁体,
61
- e.g. 简体 = '负', 繁体 = '負'
62
- 转换时可转换为简体或繁体
63
- """
64
-
65
- def __init__(self, simplified, traditional):
66
- self.simplified = simplified
67
- self.traditional = traditional
68
- # self.__repr__ = self.__str__
69
-
70
- def __str__(self):
71
- return self.simplified or self.traditional or None
72
-
73
- def __repr__(self):
74
- return self.__str__()
75
-
76
-
77
- class ChineseNumberUnit(ChineseChar):
78
- """
79
- 中文数字/数位字符
80
- 每个字符除繁简体外还有一个额外的大写字符
81
- e.g. '陆' 和 '陸'
82
- """
83
-
84
- def __init__(self, power, simplified, traditional, big_s, big_t):
85
- super(ChineseNumberUnit, self).__init__(simplified, traditional)
86
- self.power = power
87
- self.big_s = big_s
88
- self.big_t = big_t
89
-
90
- def __str__(self):
91
- return '10^{}'.format(self.power)
92
-
93
- @classmethod
94
- def create(cls, index, value, numbering_type=NUMBERING_TYPES[1], small_unit=False):
95
-
96
- if small_unit:
97
- return ChineseNumberUnit(power=index + 1,
98
- simplified=value[0], traditional=value[1], big_s=value[1], big_t=value[1])
99
- elif numbering_type == NUMBERING_TYPES[0]:
100
- return ChineseNumberUnit(power=index + 8,
101
- simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1])
102
- elif numbering_type == NUMBERING_TYPES[1]:
103
- return ChineseNumberUnit(power=(index + 2) * 4,
104
- simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1])
105
- elif numbering_type == NUMBERING_TYPES[2]:
106
- return ChineseNumberUnit(power=pow(2, index + 3),
107
- simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1])
108
- else:
109
- raise ValueError(
110
- 'Counting type should be in {0} ({1} provided).'.format(NUMBERING_TYPES, numbering_type))
111
-
112
-
113
- class ChineseNumberDigit(ChineseChar):
114
- """
115
- 中文数字字符
116
- """
117
-
118
- def __init__(self, value, simplified, traditional, big_s, big_t, alt_s=None, alt_t=None):
119
- super(ChineseNumberDigit, self).__init__(simplified, traditional)
120
- self.value = value
121
- self.big_s = big_s
122
- self.big_t = big_t
123
- self.alt_s = alt_s
124
- self.alt_t = alt_t
125
-
126
- def __str__(self):
127
- return str(self.value)
128
-
129
- @classmethod
130
- def create(cls, i, v):
131
- return ChineseNumberDigit(i, v[0], v[1], v[2], v[3])
132
-
133
-
134
- class ChineseMath(ChineseChar):
135
- """
136
- 中文数位字符
137
- """
138
-
139
- def __init__(self, simplified, traditional, symbol, expression=None):
140
- super(ChineseMath, self).__init__(simplified, traditional)
141
- self.symbol = symbol
142
- self.expression = expression
143
- self.big_s = simplified
144
- self.big_t = traditional
145
-
146
-
147
- CC, CNU, CND, CM = ChineseChar, ChineseNumberUnit, ChineseNumberDigit, ChineseMath
148
-
149
-
150
- class NumberSystem(object):
151
- """
152
- 中文数字系统
153
- """
154
- pass
155
-
156
-
157
- class MathSymbol(object):
158
- """
159
- 用于中文数字系统的数学符号 (繁/简体), e.g.
160
- positive = ['正', '正']
161
- negative = ['负', '負']
162
- point = ['点', '點']
163
- """
164
-
165
- def __init__(self, positive, negative, point):
166
- self.positive = positive
167
- self.negative = negative
168
- self.point = point
169
-
170
- def __iter__(self):
171
- for v in self.__dict__.values():
172
- yield v
173
-
174
-
175
- # class OtherSymbol(object):
176
- # """
177
- # 其他符号
178
- # """
179
- #
180
- # def __init__(self, sil):
181
- # self.sil = sil
182
- #
183
- # def __iter__(self):
184
- # for v in self.__dict__.values():
185
- # yield v
186
-
187
-
188
- # ================================================================================ #
189
- # basic utils
190
- # ================================================================================ #
191
- def create_system(numbering_type=NUMBERING_TYPES[1]):
192
- """
193
- 根据数字系统类型返回创建相应的数字系统,默认为 mid
194
- NUMBERING_TYPES = ['low', 'mid', 'high']: 中文数字系统类型
195
- low: '兆' = '亿' * '十' = $10^{9}$, '京' = '兆' * '十', etc.
196
- mid: '兆' = '亿' * '万' = $10^{12}$, '京' = '兆' * '万', etc.
197
- high: '兆' = '亿' * '亿' = $10^{16}$, '京' = '兆' * '兆', etc.
198
- 返回对应的数字系统
199
- """
200
-
201
- # chinese number units of '亿' and larger
202
- all_larger_units = zip(
203
- LARGER_CHINESE_NUMERING_UNITS_SIMPLIFIED, LARGER_CHINESE_NUMERING_UNITS_TRADITIONAL)
204
- larger_units = [CNU.create(i, v, numbering_type, False)
205
- for i, v in enumerate(all_larger_units)]
206
- # chinese number units of '十, 百, 千, 万'
207
- all_smaller_units = zip(
208
- SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED, SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL)
209
- smaller_units = [CNU.create(i, v, small_unit=True)
210
- for i, v in enumerate(all_smaller_units)]
211
- # digis
212
- chinese_digis = zip(CHINESE_DIGIS, CHINESE_DIGIS,
213
- BIG_CHINESE_DIGIS_SIMPLIFIED, BIG_CHINESE_DIGIS_TRADITIONAL)
214
- digits = [CND.create(i, v) for i, v in enumerate(chinese_digis)]
215
- digits[0].alt_s, digits[0].alt_t = ZERO_ALT, ZERO_ALT
216
- digits[1].alt_s, digits[1].alt_t = ONE_ALT, ONE_ALT
217
- digits[2].alt_s, digits[2].alt_t = TWO_ALTS[0], TWO_ALTS[1]
218
-
219
- # symbols
220
- positive_cn = CM(POSITIVE[0], POSITIVE[1], '+', lambda x: x)
221
- negative_cn = CM(NEGATIVE[0], NEGATIVE[1], '-', lambda x: -x)
222
- point_cn = CM(POINT[0], POINT[1], '.', lambda x,
223
- y: float(str(x) + '.' + str(y)))
224
- # sil_cn = CM(SIL[0], SIL[1], '-', lambda x, y: float(str(x) + '-' + str(y)))
225
- system = NumberSystem()
226
- system.units = smaller_units + larger_units
227
- system.digits = digits
228
- system.math = MathSymbol(positive_cn, negative_cn, point_cn)
229
- # system.symbols = OtherSymbol(sil_cn)
230
- return system
231
-
232
-
233
- def chn2num(chinese_string, numbering_type=NUMBERING_TYPES[1]):
234
- def get_symbol(char, system):
235
- for u in system.units:
236
- if char in [u.traditional, u.simplified, u.big_s, u.big_t]:
237
- return u
238
- for d in system.digits:
239
- if char in [d.traditional, d.simplified, d.big_s, d.big_t, d.alt_s, d.alt_t]:
240
- return d
241
- for m in system.math:
242
- if char in [m.traditional, m.simplified]:
243
- return m
244
-
245
- def string2symbols(chinese_string, system):
246
- int_string, dec_string = chinese_string, ''
247
- for p in [system.math.point.simplified, system.math.point.traditional]:
248
- if p in chinese_string:
249
- int_string, dec_string = chinese_string.split(p)
250
- break
251
- return [get_symbol(c, system) for c in int_string], \
252
- [get_symbol(c, system) for c in dec_string]
253
-
254
- def correct_symbols(integer_symbols, system):
255
- """
256
- 一百八 to 一百八十
257
- 一亿一千三百万 to 一亿 一千万 三百万
258
- """
259
-
260
- if integer_symbols and isinstance(integer_symbols[0], CNU):
261
- if integer_symbols[0].power == 1:
262
- integer_symbols = [system.digits[1]] + integer_symbols
263
-
264
- if len(integer_symbols) > 1:
265
- if isinstance(integer_symbols[-1], CND) and isinstance(integer_symbols[-2], CNU):
266
- integer_symbols.append(
267
- CNU(integer_symbols[-2].power - 1, None, None, None, None))
268
-
269
- result = []
270
- unit_count = 0
271
- for s in integer_symbols:
272
- if isinstance(s, CND):
273
- result.append(s)
274
- unit_count = 0
275
- elif isinstance(s, CNU):
276
- current_unit = CNU(s.power, None, None, None, None)
277
- unit_count += 1
278
-
279
- if unit_count == 1:
280
- result.append(current_unit)
281
- elif unit_count > 1:
282
- for i in range(len(result)):
283
- if isinstance(result[-i - 1], CNU) and result[-i - 1].power < current_unit.power:
284
- result[-i - 1] = CNU(result[-i - 1].power +
285
- current_unit.power, None, None, None, None)
286
- return result
287
-
288
- def compute_value(integer_symbols):
289
- """
290
- Compute the value.
291
- When current unit is larger than previous unit, current unit * all previous units will be used as all previous units.
292
- e.g. '两千万' = 2000 * 10000 not 2000 + 10000
293
- """
294
- value = [0]
295
- last_power = 0
296
- for s in integer_symbols:
297
- if isinstance(s, CND):
298
- value[-1] = s.value
299
- elif isinstance(s, CNU):
300
- value[-1] *= pow(10, s.power)
301
- if s.power > last_power:
302
- value[:-1] = list(map(lambda v: v *
303
- pow(10, s.power), value[:-1]))
304
- last_power = s.power
305
- value.append(0)
306
- return sum(value)
307
-
308
- system = create_system(numbering_type)
309
- int_part, dec_part = string2symbols(chinese_string, system)
310
- int_part = correct_symbols(int_part, system)
311
- int_str = str(compute_value(int_part))
312
- dec_str = ''.join([str(d.value) for d in dec_part])
313
- if dec_part:
314
- return '{0}.{1}'.format(int_str, dec_str)
315
- else:
316
- return int_str
317
-
318
-
319
- def num2chn(number_string, numbering_type=NUMBERING_TYPES[1], big=False,
320
- traditional=False, alt_zero=False, alt_one=False, alt_two=True,
321
- use_zeros=True, use_units=True):
322
- def get_value(value_string, use_zeros=True):
323
-
324
- striped_string = value_string.lstrip('0')
325
-
326
- # record nothing if all zeros
327
- if not striped_string:
328
- return []
329
-
330
- # record one digits
331
- elif len(striped_string) == 1:
332
- if use_zeros and len(value_string) != len(striped_string):
333
- return [system.digits[0], system.digits[int(striped_string)]]
334
- else:
335
- return [system.digits[int(striped_string)]]
336
-
337
- # recursively record multiple digits
338
- else:
339
- result_unit = next(u for u in reversed(
340
- system.units) if u.power < len(striped_string))
341
- result_string = value_string[:-result_unit.power]
342
- return get_value(result_string) + [result_unit] + get_value(striped_string[-result_unit.power:])
343
-
344
- system = create_system(numbering_type)
345
-
346
- int_dec = number_string.split('.')
347
- if len(int_dec) == 1:
348
- int_string = int_dec[0]
349
- dec_string = ""
350
- elif len(int_dec) == 2:
351
- int_string = int_dec[0]
352
- dec_string = int_dec[1]
353
- else:
354
- raise ValueError(
355
- "invalid input num string with more than one dot: {}".format(number_string))
356
-
357
- if use_units and len(int_string) > 1:
358
- result_symbols = get_value(int_string)
359
- else:
360
- result_symbols = [system.digits[int(c)] for c in int_string]
361
- dec_symbols = [system.digits[int(c)] for c in dec_string]
362
- if dec_string:
363
- result_symbols += [system.math.point] + dec_symbols
364
-
365
- if alt_two:
366
- liang = CND(2, system.digits[2].alt_s, system.digits[2].alt_t,
367
- system.digits[2].big_s, system.digits[2].big_t)
368
- for i, v in enumerate(result_symbols):
369
- if isinstance(v, CND) and v.value == 2:
370
- next_symbol = result_symbols[i +
371
- 1] if i < len(result_symbols) - 1 else None
372
- previous_symbol = result_symbols[i - 1] if i > 0 else None
373
- if isinstance(next_symbol, CNU) and isinstance(previous_symbol, (CNU, type(None))):
374
- if next_symbol.power != 1 and ((previous_symbol is None) or (previous_symbol.power != 1)):
375
- result_symbols[i] = liang
376
-
377
- # if big is True, '两' will not be used and `alt_two` has no impact on output
378
- if big:
379
- attr_name = 'big_'
380
- if traditional:
381
- attr_name += 't'
382
- else:
383
- attr_name += 's'
384
- else:
385
- if traditional:
386
- attr_name = 'traditional'
387
- else:
388
- attr_name = 'simplified'
389
-
390
- result = ''.join([getattr(s, attr_name) for s in result_symbols])
391
-
392
- # if not use_zeros:
393
- # result = result.strip(getattr(system.digits[0], attr_name))
394
-
395
- if alt_zero:
396
- result = result.replace(
397
- getattr(system.digits[0], attr_name), system.digits[0].alt_s)
398
-
399
- if alt_one:
400
- result = result.replace(
401
- getattr(system.digits[1], attr_name), system.digits[1].alt_s)
402
-
403
- for i, p in enumerate(POINT):
404
- if result.startswith(p):
405
- return CHINESE_DIGIS[0] + result
406
-
407
- # ^10, 11, .., 19
408
- if len(result) >= 2 and result[1] in [SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED[0],
409
- SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL[0]] and \
410
- result[0] in [CHINESE_DIGIS[1], BIG_CHINESE_DIGIS_SIMPLIFIED[1], BIG_CHINESE_DIGIS_TRADITIONAL[1]]:
411
- result = result[1:]
412
-
413
- return result
414
-
415
-
416
- # ================================================================================ #
417
- # different types of rewriters
418
- # ================================================================================ #
419
- class Cardinal:
420
- """
421
- CARDINAL类
422
- """
423
-
424
- def __init__(self, cardinal=None, chntext=None):
425
- self.cardinal = cardinal
426
- self.chntext = chntext
427
-
428
- def chntext2cardinal(self):
429
- return chn2num(self.chntext)
430
-
431
- def cardinal2chntext(self):
432
- return num2chn(self.cardinal)
433
-
434
-
435
- class Digit:
436
- """
437
- DIGIT类
438
- """
439
-
440
- def __init__(self, digit=None, chntext=None):
441
- self.digit = digit
442
- self.chntext = chntext
443
-
444
- # def chntext2digit(self):
445
- # return chn2num(self.chntext)
446
-
447
- def digit2chntext(self):
448
- return num2chn(self.digit, alt_two=False, use_units=False)
449
-
450
-
451
- class TelePhone:
452
- """
453
- TELEPHONE类
454
- """
455
-
456
- def __init__(self, telephone=None, raw_chntext=None, chntext=None):
457
- self.telephone = telephone
458
- self.raw_chntext = raw_chntext
459
- self.chntext = chntext
460
-
461
- # def chntext2telephone(self):
462
- # sil_parts = self.raw_chntext.split('<SIL>')
463
- # self.telephone = '-'.join([
464
- # str(chn2num(p)) for p in sil_parts
465
- # ])
466
- # return self.telephone
467
-
468
- def telephone2chntext(self, fixed=False):
469
-
470
- if fixed:
471
- sil_parts = self.telephone.split('-')
472
- self.raw_chntext = '<SIL>'.join([
473
- num2chn(part, alt_two=False, use_units=False) for part in sil_parts
474
- ])
475
- self.chntext = self.raw_chntext.replace('<SIL>', '')
476
- else:
477
- sp_parts = self.telephone.strip('+').split()
478
- self.raw_chntext = '<SP>'.join([
479
- num2chn(part, alt_two=False, use_units=False) for part in sp_parts
480
- ])
481
- self.chntext = self.raw_chntext.replace('<SP>', '')
482
- return self.chntext
483
-
484
-
485
- class Fraction:
486
- """
487
- FRACTION类
488
- """
489
-
490
- def __init__(self, fraction=None, chntext=None):
491
- self.fraction = fraction
492
- self.chntext = chntext
493
-
494
- def chntext2fraction(self):
495
- denominator, numerator = self.chntext.split('分之')
496
- return chn2num(numerator) + '/' + chn2num(denominator)
497
-
498
- def fraction2chntext(self):
499
- numerator, denominator = self.fraction.split('/')
500
- return num2chn(denominator) + '分之' + num2chn(numerator)
501
-
502
-
503
- class Date:
504
- """
505
- DATE类
506
- """
507
-
508
- def __init__(self, date=None, chntext=None):
509
- self.date = date
510
- self.chntext = chntext
511
-
512
- # def chntext2date(self):
513
- # chntext = self.chntext
514
- # try:
515
- # year, other = chntext.strip().split('年', maxsplit=1)
516
- # year = Digit(chntext=year).digit2chntext() + '年'
517
- # except ValueError:
518
- # other = chntext
519
- # year = ''
520
- # if other:
521
- # try:
522
- # month, day = other.strip().split('月', maxsplit=1)
523
- # month = Cardinal(chntext=month).chntext2cardinal() + '月'
524
- # except ValueError:
525
- # day = chntext
526
- # month = ''
527
- # if day:
528
- # day = Cardinal(chntext=day[:-1]).chntext2cardinal() + day[-1]
529
- # else:
530
- # month = ''
531
- # day = ''
532
- # date = year + month + day
533
- # self.date = date
534
- # return self.date
535
-
536
- def date2chntext(self):
537
- date = self.date
538
- try:
539
- year, other = date.strip().split('年', 1)
540
- year = Digit(digit=year).digit2chntext() + '年'
541
- except ValueError:
542
- other = date
543
- year = ''
544
- if other:
545
- try:
546
- month, day = other.strip().split('月', 1)
547
- month = Cardinal(cardinal=month).cardinal2chntext() + '月'
548
- except ValueError:
549
- day = date
550
- month = ''
551
- if day:
552
- day = Cardinal(cardinal=day[:-1]).cardinal2chntext() + day[-1]
553
- else:
554
- month = ''
555
- day = ''
556
- chntext = year + month + day
557
- self.chntext = chntext
558
- return self.chntext
559
-
560
-
561
- class Money:
562
- """
563
- MONEY类
564
- """
565
-
566
- def __init__(self, money=None, chntext=None):
567
- self.money = money
568
- self.chntext = chntext
569
-
570
- # def chntext2money(self):
571
- # return self.money
572
-
573
- def money2chntext(self):
574
- money = self.money
575
- pattern = re.compile(r'(\d+(\.\d+)?)')
576
- matchers = pattern.findall(money)
577
- if matchers:
578
- for matcher in matchers:
579
- money = money.replace(matcher[0], Cardinal(cardinal=matcher[0]).cardinal2chntext())
580
- self.chntext = money
581
- return self.chntext
582
-
583
-
584
- class Percentage:
585
- """
586
- PERCENTAGE类
587
- """
588
-
589
- def __init__(self, percentage=None, chntext=None):
590
- self.percentage = percentage
591
- self.chntext = chntext
592
-
593
- def chntext2percentage(self):
594
- return chn2num(self.chntext.strip().strip('百分之')) + '%'
595
-
596
- def percentage2chntext(self):
597
- return '百分之' + num2chn(self.percentage.strip().strip('%'))
598
-
599
-
600
- # ================================================================================ #
601
- # NSW Normalizer
602
- # ================================================================================ #
603
- class NSWNormalizer:
604
- def __init__(self, raw_text):
605
- self.raw_text = '^' + raw_text + '$'
606
- self.norm_text = ''
607
-
608
- def _particular(self):
609
- text = self.norm_text
610
- pattern = re.compile(r"(([a-zA-Z]+)二([a-zA-Z]+))")
611
- matchers = pattern.findall(text)
612
- if matchers:
613
- # print('particular')
614
- for matcher in matchers:
615
- text = text.replace(matcher[0], matcher[1] + '2' + matcher[2], 1)
616
- self.norm_text = text
617
- return self.norm_text
618
-
619
- def normalize(self, remove_punc=True):
620
- text = self.raw_text
621
-
622
- # 规范化日期
623
- pattern = re.compile(r"\D+((([089]\d|(19|20)\d{2})年)?(\d{1,2}月(\d{1,2}[日号])?)?)")
624
- matchers = pattern.findall(text)
625
- if matchers:
626
- # print('date')
627
- for matcher in matchers:
628
- text = text.replace(matcher[0], Date(date=matcher[0]).date2chntext(), 1)
629
-
630
- # 规范化金钱
631
- pattern = re.compile(r"\D+((\d+(\.\d+)?)[多余几]?" + CURRENCY_UNITS + r"(\d" + CURRENCY_UNITS + r"?)?)")
632
- matchers = pattern.findall(text)
633
- if matchers:
634
- # print('money')
635
- for matcher in matchers:
636
- text = text.replace(matcher[0], Money(money=matcher[0]).money2chntext(), 1)
637
-
638
- # 规范化固话/手机号码
639
- # 手机
640
- # http://www.jihaoba.com/news/show/13680
641
- # 移动:139、138、137、136、135、134、159、158、157、150、151、152、188、187、182、183、184、178、198
642
- # 联通:130、131、132、156、155、186、185、176
643
- # 电信:133、153、189、180、181、177
644
- pattern = re.compile(r"\D((\+?86 ?)?1([38]\d|5[0-35-9]|7[678]|9[89])\d{8})\D")
645
- matchers = pattern.findall(text)
646
- if matchers:
647
- # print('telephone')
648
- for matcher in matchers:
649
- text = text.replace(matcher[0], TelePhone(telephone=matcher[0]).telephone2chntext(), 1)
650
- # 固话
651
- pattern = re.compile(r"\D((0(10|2[1-3]|[3-9]\d{2})-?)?[1-9]\d{6,7})\D")
652
- matchers = pattern.findall(text)
653
- if matchers:
654
- # print('fixed telephone')
655
- for matcher in matchers:
656
- text = text.replace(matcher[0], TelePhone(telephone=matcher[0]).telephone2chntext(fixed=True), 1)
657
-
658
- # 规范化分数
659
- pattern = re.compile(r"(\d+/\d+)")
660
- matchers = pattern.findall(text)
661
- if matchers:
662
- # print('fraction')
663
- for matcher in matchers:
664
- text = text.replace(matcher, Fraction(fraction=matcher).fraction2chntext(), 1)
665
-
666
- # 规范化百分数
667
- text = text.replace('%', '%')
668
- pattern = re.compile(r"(\d+(\.\d+)?%)")
669
- matchers = pattern.findall(text)
670
- if matchers:
671
- # print('percentage')
672
- for matcher in matchers:
673
- text = text.replace(matcher[0], Percentage(percentage=matcher[0]).percentage2chntext(), 1)
674
-
675
- # 规范化纯数+量词
676
- pattern = re.compile(r"(\d+(\.\d+)?)[多余几]?" + COM_QUANTIFIERS)
677
- matchers = pattern.findall(text)
678
- if matchers:
679
- # print('cardinal+quantifier')
680
- for matcher in matchers:
681
- text = text.replace(matcher[0], Cardinal(cardinal=matcher[0]).cardinal2chntext(), 1)
682
-
683
- # 规范化数字编号
684
- pattern = re.compile(r"(\d{4,32})")
685
- matchers = pattern.findall(text)
686
- if matchers:
687
- # print('digit')
688
- for matcher in matchers:
689
- text = text.replace(matcher, Digit(digit=matcher).digit2chntext(), 1)
690
-
691
- # 规范化纯数
692
- pattern = re.compile(r"(\d+(\.\d+)?)")
693
- matchers = pattern.findall(text)
694
- if matchers:
695
- # print('cardinal')
696
- for matcher in matchers:
697
- text = text.replace(matcher[0], Cardinal(cardinal=matcher[0]).cardinal2chntext(), 1)
698
-
699
- self.norm_text = text
700
- self._particular()
701
-
702
- text = self.norm_text.lstrip('^').rstrip('$')
703
- if remove_punc:
704
- # Punctuations removal
705
- old_chars = CHINESE_PUNC_LIST + string.punctuation # includes all CN and EN punctuations
706
- new_chars = ' ' * len(old_chars)
707
- del_chars = ''
708
- text = text.translate(str.maketrans(old_chars, new_chars, del_chars))
709
- return text
710
-
711
-
712
- def nsw_test_case(raw_text):
713
- print('I:' + raw_text)
714
- print('O:' + NSWNormalizer(raw_text).normalize())
715
- print('')
716
-
717
-
718
- def nsw_test():
719
- nsw_test_case('固话:0595-23865596或23880880。')
720
- nsw_test_case('固话:0595-23865596或23880880。')
721
- nsw_test_case('手机:+86 19859213959或15659451527。')
722
- nsw_test_case('分数:32477/76391。')
723
- nsw_test_case('百分数:80.03%。')
724
- nsw_test_case('编号:31520181154418。')
725
- nsw_test_case('纯数:2983.07克或12345.60米。')
726
- nsw_test_case('日期:1999年2月20日或09年3月15号。')
727
- nsw_test_case('金钱:12块5,34.5元,20.1万')
728
- nsw_test_case('特殊:O2O或B2C。')
729
- nsw_test_case('3456万吨')
730
- nsw_test_case('2938个')
731
- nsw_test_case('938')
732
- nsw_test_case('今天吃了115个小笼包231个馒头')
733
- nsw_test_case('有62%的概率')
734
-
735
-
736
- if __name__ == '__main__':
737
- # nsw_test()
738
-
739
- p = argparse.ArgumentParser()
740
- p.add_argument('ifile', help='input filename, assume utf-8 encoding')
741
- p.add_argument('ofile', help='output filename')
742
- p.add_argument('--to_upper', action='store_true', help='convert to upper case')
743
- p.add_argument('--to_lower', action='store_true', help='convert to lower case')
744
- p.add_argument('--has_key', action='store_true', help="input text has Kaldi's key as first field.")
745
- p.add_argument('--log_interval', type=int, default=10000, help='log interval in number of processed lines')
746
- args = p.parse_args()
747
-
748
- ifile = codecs.open(args.ifile, 'r', 'utf8')
749
- ofile = codecs.open(args.ofile, 'w+', 'utf8')
750
-
751
- n = 0
752
- for l in ifile:
753
- key = ''
754
- text = ''
755
- if args.has_key:
756
- cols = l.split(maxsplit=1)
757
- key = cols[0]
758
- if len(cols) == 2:
759
- text = cols[1]
760
- else:
761
- text = ''
762
- else:
763
- text = l
764
-
765
- # cases
766
- if args.to_upper and args.to_lower:
767
- sys.stderr.write('text norm: to_upper OR to_lower?')
768
- exit(1)
769
- if args.to_upper:
770
- text = text.upper()
771
- if args.to_lower:
772
- text = text.lower()
773
-
774
- # NSW(Non-Standard-Word) normalization
775
- text = NSWNormalizer(text).normalize()
776
-
777
- #
778
- if args.has_key:
779
- ofile.write(key + '\t' + text)
780
- else:
781
- ofile.write(text)
782
-
783
- n += 1
784
- if n % args.log_interval == 0:
785
- sys.stderr.write("text norm: {} lines done.\n".format(n))
786
-
787
- sys.stderr.write("text norm: {} lines done in total.\n".format(n))
788
-
789
- ifile.close()
790
- ofile.close()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/models/diffusion/dpm_solver/dpm_solver.py DELETED
@@ -1,1154 +0,0 @@
1
- import torch
2
- import torch.nn.functional as F
3
- import math
4
- from tqdm import tqdm
5
-
6
-
7
- class NoiseScheduleVP:
8
- def __init__(
9
- self,
10
- schedule='discrete',
11
- betas=None,
12
- alphas_cumprod=None,
13
- continuous_beta_0=0.1,
14
- continuous_beta_1=20.,
15
- ):
16
- """Create a wrapper class for the forward SDE (VP type).
17
- ***
18
- Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.
19
- We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.
20
- ***
21
- The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).
22
- We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).
23
- Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:
24
- log_alpha_t = self.marginal_log_mean_coeff(t)
25
- sigma_t = self.marginal_std(t)
26
- lambda_t = self.marginal_lambda(t)
27
- Moreover, as lambda(t) is an invertible function, we also support its inverse function:
28
- t = self.inverse_lambda(lambda_t)
29
- ===============================================================
30
- We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).
31
- 1. For discrete-time DPMs:
32
- For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:
33
- t_i = (i + 1) / N
34
- e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.
35
- We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.
36
- Args:
37
- betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)
38
- alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)
39
- Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.
40
- **Important**: Please pay special attention for the args for `alphas_cumprod`:
41
- The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that
42
- q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ).
43
- Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have
44
- alpha_{t_n} = \sqrt{\hat{alpha_n}},
45
- and
46
- log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}).
47
- 2. For continuous-time DPMs:
48
- We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise
49
- schedule are the default settings in DDPM and improved-DDPM:
50
- Args:
51
- beta_min: A `float` number. The smallest beta for the linear schedule.
52
- beta_max: A `float` number. The largest beta for the linear schedule.
53
- cosine_s: A `float` number. The hyperparameter in the cosine schedule.
54
- cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule.
55
- T: A `float` number. The ending time of the forward process.
56
- ===============================================================
57
- Args:
58
- schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,
59
- 'linear' or 'cosine' for continuous-time DPMs.
60
- Returns:
61
- A wrapper object of the forward SDE (VP type).
62
-
63
- ===============================================================
64
- Example:
65
- # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):
66
- >>> ns = NoiseScheduleVP('discrete', betas=betas)
67
- # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1):
68
- >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)
69
- # For continuous-time DPMs (VPSDE), linear schedule:
70
- >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)
71
- """
72
-
73
- if schedule not in ['discrete', 'linear', 'cosine']:
74
- raise ValueError(
75
- "Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format(
76
- schedule))
77
-
78
- self.schedule = schedule
79
- if schedule == 'discrete':
80
- if betas is not None:
81
- log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)
82
- else:
83
- assert alphas_cumprod is not None
84
- log_alphas = 0.5 * torch.log(alphas_cumprod)
85
- self.total_N = len(log_alphas)
86
- self.T = 1.
87
- self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1))
88
- self.log_alpha_array = log_alphas.reshape((1, -1,))
89
- else:
90
- self.total_N = 1000
91
- self.beta_0 = continuous_beta_0
92
- self.beta_1 = continuous_beta_1
93
- self.cosine_s = 0.008
94
- self.cosine_beta_max = 999.
95
- self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (
96
- 1. + self.cosine_s) / math.pi - self.cosine_s
97
- self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.))
98
- self.schedule = schedule
99
- if schedule == 'cosine':
100
- # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T.
101
- # Note that T = 0.9946 may be not the optimal setting. However, we find it works well.
102
- self.T = 0.9946
103
- else:
104
- self.T = 1.
105
-
106
- def marginal_log_mean_coeff(self, t):
107
- """
108
- Compute log(alpha_t) of a given continuous-time label t in [0, T].
109
- """
110
- if self.schedule == 'discrete':
111
- return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device),
112
- self.log_alpha_array.to(t.device)).reshape((-1))
113
- elif self.schedule == 'linear':
114
- return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
115
- elif self.schedule == 'cosine':
116
- log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.))
117
- log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0
118
- return log_alpha_t
119
-
120
- def marginal_alpha(self, t):
121
- """
122
- Compute alpha_t of a given continuous-time label t in [0, T].
123
- """
124
- return torch.exp(self.marginal_log_mean_coeff(t))
125
-
126
- def marginal_std(self, t):
127
- """
128
- Compute sigma_t of a given continuous-time label t in [0, T].
129
- """
130
- return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))
131
-
132
- def marginal_lambda(self, t):
133
- """
134
- Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].
135
- """
136
- log_mean_coeff = self.marginal_log_mean_coeff(t)
137
- log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff))
138
- return log_mean_coeff - log_std
139
-
140
- def inverse_lambda(self, lamb):
141
- """
142
- Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.
143
- """
144
- if self.schedule == 'linear':
145
- tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
146
- Delta = self.beta_0 ** 2 + tmp
147
- return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)
148
- elif self.schedule == 'discrete':
149
- log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb)
150
- t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]),
151
- torch.flip(self.t_array.to(lamb.device), [1]))
152
- return t.reshape((-1,))
153
- else:
154
- log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
155
- t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * (
156
- 1. + self.cosine_s) / math.pi - self.cosine_s
157
- t = t_fn(log_alpha)
158
- return t
159
-
160
-
161
- def model_wrapper(
162
- model,
163
- noise_schedule,
164
- model_type="noise",
165
- model_kwargs={},
166
- guidance_type="uncond",
167
- condition=None,
168
- unconditional_condition=None,
169
- guidance_scale=1.,
170
- classifier_fn=None,
171
- classifier_kwargs={},
172
- ):
173
- """Create a wrapper function for the noise prediction model.
174
- DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to
175
- firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.
176
- We support four types of the diffusion model by setting `model_type`:
177
- 1. "noise": noise prediction model. (Trained by predicting noise).
178
- 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0).
179
- 3. "v": velocity prediction model. (Trained by predicting the velocity).
180
- The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].
181
- [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models."
182
- arXiv preprint arXiv:2202.00512 (2022).
183
- [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models."
184
- arXiv preprint arXiv:2210.02303 (2022).
185
-
186
- 4. "score": marginal score function. (Trained by denoising score matching).
187
- Note that the score function and the noise prediction model follows a simple relationship:
188
- ```
189
- noise(x_t, t) = -sigma_t * score(x_t, t)
190
- ```
191
- We support three types of guided sampling by DPMs by setting `guidance_type`:
192
- 1. "uncond": unconditional sampling by DPMs.
193
- The input `model` has the following format:
194
- ``
195
- model(x, t_input, **model_kwargs) -> noise | x_start | v | score
196
- ``
197
- 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier.
198
- The input `model` has the following format:
199
- ``
200
- model(x, t_input, **model_kwargs) -> noise | x_start | v | score
201
- ``
202
- The input `classifier_fn` has the following format:
203
- ``
204
- classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)
205
- ``
206
- [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis,"
207
- in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.
208
- 3. "classifier-free": classifier-free guidance sampling by conditional DPMs.
209
- The input `model` has the following format:
210
- ``
211
- model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score
212
- ``
213
- And if cond == `unconditional_condition`, the model output is the unconditional DPM output.
214
- [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."
215
- arXiv preprint arXiv:2207.12598 (2022).
216
-
217
- The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)
218
- or continuous-time labels (i.e. epsilon to T).
219
- We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:
220
- ``
221
- def model_fn(x, t_continuous) -> noise:
222
- t_input = get_model_input_time(t_continuous)
223
- return noise_pred(model, x, t_input, **model_kwargs)
224
- ``
225
- where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.
226
- ===============================================================
227
- Args:
228
- model: A diffusion model with the corresponding format described above.
229
- noise_schedule: A noise schedule object, such as NoiseScheduleVP.
230
- model_type: A `str`. The parameterization type of the diffusion model.
231
- "noise" or "x_start" or "v" or "score".
232
- model_kwargs: A `dict`. A dict for the other inputs of the model function.
233
- guidance_type: A `str`. The type of the guidance for sampling.
234
- "uncond" or "classifier" or "classifier-free".
235
- condition: A pytorch tensor. The condition for the guided sampling.
236
- Only used for "classifier" or "classifier-free" guidance type.
237
- unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.
238
- Only used for "classifier-free" guidance type.
239
- guidance_scale: A `float`. The scale for the guided sampling.
240
- classifier_fn: A classifier function. Only used for the classifier guidance.
241
- classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.
242
- Returns:
243
- A noise prediction model that accepts the noised data and the continuous time as the inputs.
244
- """
245
-
246
- def get_model_input_time(t_continuous):
247
- """
248
- Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.
249
- For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].
250
- For continuous-time DPMs, we just use `t_continuous`.
251
- """
252
- if noise_schedule.schedule == 'discrete':
253
- return (t_continuous - 1. / noise_schedule.total_N) * 1000.
254
- else:
255
- return t_continuous
256
-
257
- def noise_pred_fn(x, t_continuous, cond=None):
258
- if t_continuous.reshape((-1,)).shape[0] == 1:
259
- t_continuous = t_continuous.expand((x.shape[0]))
260
- t_input = get_model_input_time(t_continuous)
261
- if cond is None:
262
- output = model(x, t_input, **model_kwargs)
263
- else:
264
- output = model(x, t_input, cond, **model_kwargs)
265
- if model_type == "noise":
266
- return output
267
- elif model_type == "x_start":
268
- alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
269
- dims = x.dim()
270
- return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims)
271
- elif model_type == "v":
272
- alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
273
- dims = x.dim()
274
- return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x
275
- elif model_type == "score":
276
- sigma_t = noise_schedule.marginal_std(t_continuous)
277
- dims = x.dim()
278
- return -expand_dims(sigma_t, dims) * output
279
-
280
- def cond_grad_fn(x, t_input):
281
- """
282
- Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).
283
- """
284
- with torch.enable_grad():
285
- x_in = x.detach().requires_grad_(True)
286
- log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)
287
- return torch.autograd.grad(log_prob.sum(), x_in)[0]
288
-
289
- def model_fn(x, t_continuous):
290
- """
291
- The noise predicition model function that is used for DPM-Solver.
292
- """
293
- if t_continuous.reshape((-1,)).shape[0] == 1:
294
- t_continuous = t_continuous.expand((x.shape[0]))
295
- if guidance_type == "uncond":
296
- return noise_pred_fn(x, t_continuous)
297
- elif guidance_type == "classifier":
298
- assert classifier_fn is not None
299
- t_input = get_model_input_time(t_continuous)
300
- cond_grad = cond_grad_fn(x, t_input)
301
- sigma_t = noise_schedule.marginal_std(t_continuous)
302
- noise = noise_pred_fn(x, t_continuous)
303
- return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad
304
- elif guidance_type == "classifier-free":
305
- if guidance_scale == 1. or unconditional_condition is None:
306
- return noise_pred_fn(x, t_continuous, cond=condition)
307
- else:
308
- x_in = torch.cat([x] * 2)
309
- t_in = torch.cat([t_continuous] * 2)
310
- c_in = torch.cat([unconditional_condition, condition])
311
- noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)
312
- return noise_uncond + guidance_scale * (noise - noise_uncond)
313
-
314
- assert model_type in ["noise", "x_start", "v"]
315
- assert guidance_type in ["uncond", "classifier", "classifier-free"]
316
- return model_fn
317
-
318
-
319
- class DPM_Solver:
320
- def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.):
321
- """Construct a DPM-Solver.
322
- We support both the noise prediction model ("predicting epsilon") and the data prediction model ("predicting x0").
323
- If `predict_x0` is False, we use the solver for the noise prediction model (DPM-Solver).
324
- If `predict_x0` is True, we use the solver for the data prediction model (DPM-Solver++).
325
- In such case, we further support the "dynamic thresholding" in [1] when `thresholding` is True.
326
- The "dynamic thresholding" can greatly improve the sample quality for pixel-space DPMs with large guidance scales.
327
- Args:
328
- model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]):
329
- ``
330
- def model_fn(x, t_continuous):
331
- return noise
332
- ``
333
- noise_schedule: A noise schedule object, such as NoiseScheduleVP.
334
- predict_x0: A `bool`. If true, use the data prediction model; else, use the noise prediction model.
335
- thresholding: A `bool`. Valid when `predict_x0` is True. Whether to use the "dynamic thresholding" in [1].
336
- max_val: A `float`. Valid when both `predict_x0` and `thresholding` are True. The max value for thresholding.
337
-
338
- [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b.
339
- """
340
- self.model = model_fn
341
- self.noise_schedule = noise_schedule
342
- self.predict_x0 = predict_x0
343
- self.thresholding = thresholding
344
- self.max_val = max_val
345
-
346
- def noise_prediction_fn(self, x, t):
347
- """
348
- Return the noise prediction model.
349
- """
350
- return self.model(x, t)
351
-
352
- def data_prediction_fn(self, x, t):
353
- """
354
- Return the data prediction model (with thresholding).
355
- """
356
- noise = self.noise_prediction_fn(x, t)
357
- dims = x.dim()
358
- alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)
359
- x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims)
360
- if self.thresholding:
361
- p = 0.995 # A hyperparameter in the paper of "Imagen" [1].
362
- s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)
363
- s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims)
364
- x0 = torch.clamp(x0, -s, s) / s
365
- return x0
366
-
367
- def model_fn(self, x, t):
368
- """
369
- Convert the model to the noise prediction model or the data prediction model.
370
- """
371
- if self.predict_x0:
372
- return self.data_prediction_fn(x, t)
373
- else:
374
- return self.noise_prediction_fn(x, t)
375
-
376
- def get_time_steps(self, skip_type, t_T, t_0, N, device):
377
- """Compute the intermediate time steps for sampling.
378
- Args:
379
- skip_type: A `str`. The type for the spacing of the time steps. We support three types:
380
- - 'logSNR': uniform logSNR for the time steps.
381
- - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
382
- - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
383
- t_T: A `float`. The starting time of the sampling (default is T).
384
- t_0: A `float`. The ending time of the sampling (default is epsilon).
385
- N: A `int`. The total number of the spacing of the time steps.
386
- device: A torch device.
387
- Returns:
388
- A pytorch tensor of the time steps, with the shape (N + 1,).
389
- """
390
- if skip_type == 'logSNR':
391
- lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))
392
- lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))
393
- logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device)
394
- return self.noise_schedule.inverse_lambda(logSNR_steps)
395
- elif skip_type == 'time_uniform':
396
- return torch.linspace(t_T, t_0, N + 1).to(device)
397
- elif skip_type == 'time_quadratic':
398
- t_order = 2
399
- t = torch.linspace(t_T ** (1. / t_order), t_0 ** (1. / t_order), N + 1).pow(t_order).to(device)
400
- return t
401
- else:
402
- raise ValueError(
403
- "Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type))
404
-
405
- def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):
406
- """
407
- Get the order of each step for sampling by the singlestep DPM-Solver.
408
- We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast".
409
- Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is:
410
- - If order == 1:
411
- We take `steps` of DPM-Solver-1 (i.e. DDIM).
412
- - If order == 2:
413
- - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling.
414
- - If steps % 2 == 0, we use K steps of DPM-Solver-2.
415
- - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1.
416
- - If order == 3:
417
- - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
418
- - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1.
419
- - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1.
420
- - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2.
421
- ============================================
422
- Args:
423
- order: A `int`. The max order for the solver (2 or 3).
424
- steps: A `int`. The total number of function evaluations (NFE).
425
- skip_type: A `str`. The type for the spacing of the time steps. We support three types:
426
- - 'logSNR': uniform logSNR for the time steps.
427
- - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
428
- - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
429
- t_T: A `float`. The starting time of the sampling (default is T).
430
- t_0: A `float`. The ending time of the sampling (default is epsilon).
431
- device: A torch device.
432
- Returns:
433
- orders: A list of the solver order of each step.
434
- """
435
- if order == 3:
436
- K = steps // 3 + 1
437
- if steps % 3 == 0:
438
- orders = [3, ] * (K - 2) + [2, 1]
439
- elif steps % 3 == 1:
440
- orders = [3, ] * (K - 1) + [1]
441
- else:
442
- orders = [3, ] * (K - 1) + [2]
443
- elif order == 2:
444
- if steps % 2 == 0:
445
- K = steps // 2
446
- orders = [2, ] * K
447
- else:
448
- K = steps // 2 + 1
449
- orders = [2, ] * (K - 1) + [1]
450
- elif order == 1:
451
- K = 1
452
- orders = [1, ] * steps
453
- else:
454
- raise ValueError("'order' must be '1' or '2' or '3'.")
455
- if skip_type == 'logSNR':
456
- # To reproduce the results in DPM-Solver paper
457
- timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)
458
- else:
459
- timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[
460
- torch.cumsum(torch.tensor([0, ] + orders)).to(device)]
461
- return timesteps_outer, orders
462
-
463
- def denoise_to_zero_fn(self, x, s):
464
- """
465
- Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.
466
- """
467
- return self.data_prediction_fn(x, s)
468
-
469
- def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False):
470
- """
471
- DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`.
472
- Args:
473
- x: A pytorch tensor. The initial value at time `s`.
474
- s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
475
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
476
- model_s: A pytorch tensor. The model function evaluated at time `s`.
477
- If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
478
- return_intermediate: A `bool`. If true, also return the model value at time `s`.
479
- Returns:
480
- x_t: A pytorch tensor. The approximated solution at time `t`.
481
- """
482
- ns = self.noise_schedule
483
- dims = x.dim()
484
- lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
485
- h = lambda_t - lambda_s
486
- log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t)
487
- sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t)
488
- alpha_t = torch.exp(log_alpha_t)
489
-
490
- if self.predict_x0:
491
- phi_1 = torch.expm1(-h)
492
- if model_s is None:
493
- model_s = self.model_fn(x, s)
494
- x_t = (
495
- expand_dims(sigma_t / sigma_s, dims) * x
496
- - expand_dims(alpha_t * phi_1, dims) * model_s
497
- )
498
- if return_intermediate:
499
- return x_t, {'model_s': model_s}
500
- else:
501
- return x_t
502
- else:
503
- phi_1 = torch.expm1(h)
504
- if model_s is None:
505
- model_s = self.model_fn(x, s)
506
- x_t = (
507
- expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
508
- - expand_dims(sigma_t * phi_1, dims) * model_s
509
- )
510
- if return_intermediate:
511
- return x_t, {'model_s': model_s}
512
- else:
513
- return x_t
514
-
515
- def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False,
516
- solver_type='dpm_solver'):
517
- """
518
- Singlestep solver DPM-Solver-2 from time `s` to time `t`.
519
- Args:
520
- x: A pytorch tensor. The initial value at time `s`.
521
- s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
522
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
523
- r1: A `float`. The hyperparameter of the second-order solver.
524
- model_s: A pytorch tensor. The model function evaluated at time `s`.
525
- If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
526
- return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time).
527
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
528
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
529
- Returns:
530
- x_t: A pytorch tensor. The approximated solution at time `t`.
531
- """
532
- if solver_type not in ['dpm_solver', 'taylor']:
533
- raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
534
- if r1 is None:
535
- r1 = 0.5
536
- ns = self.noise_schedule
537
- dims = x.dim()
538
- lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
539
- h = lambda_t - lambda_s
540
- lambda_s1 = lambda_s + r1 * h
541
- s1 = ns.inverse_lambda(lambda_s1)
542
- log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(
543
- s1), ns.marginal_log_mean_coeff(t)
544
- sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t)
545
- alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t)
546
-
547
- if self.predict_x0:
548
- phi_11 = torch.expm1(-r1 * h)
549
- phi_1 = torch.expm1(-h)
550
-
551
- if model_s is None:
552
- model_s = self.model_fn(x, s)
553
- x_s1 = (
554
- expand_dims(sigma_s1 / sigma_s, dims) * x
555
- - expand_dims(alpha_s1 * phi_11, dims) * model_s
556
- )
557
- model_s1 = self.model_fn(x_s1, s1)
558
- if solver_type == 'dpm_solver':
559
- x_t = (
560
- expand_dims(sigma_t / sigma_s, dims) * x
561
- - expand_dims(alpha_t * phi_1, dims) * model_s
562
- - (0.5 / r1) * expand_dims(alpha_t * phi_1, dims) * (model_s1 - model_s)
563
- )
564
- elif solver_type == 'taylor':
565
- x_t = (
566
- expand_dims(sigma_t / sigma_s, dims) * x
567
- - expand_dims(alpha_t * phi_1, dims) * model_s
568
- + (1. / r1) * expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * (
569
- model_s1 - model_s)
570
- )
571
- else:
572
- phi_11 = torch.expm1(r1 * h)
573
- phi_1 = torch.expm1(h)
574
-
575
- if model_s is None:
576
- model_s = self.model_fn(x, s)
577
- x_s1 = (
578
- expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x
579
- - expand_dims(sigma_s1 * phi_11, dims) * model_s
580
- )
581
- model_s1 = self.model_fn(x_s1, s1)
582
- if solver_type == 'dpm_solver':
583
- x_t = (
584
- expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
585
- - expand_dims(sigma_t * phi_1, dims) * model_s
586
- - (0.5 / r1) * expand_dims(sigma_t * phi_1, dims) * (model_s1 - model_s)
587
- )
588
- elif solver_type == 'taylor':
589
- x_t = (
590
- expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
591
- - expand_dims(sigma_t * phi_1, dims) * model_s
592
- - (1. / r1) * expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * (model_s1 - model_s)
593
- )
594
- if return_intermediate:
595
- return x_t, {'model_s': model_s, 'model_s1': model_s1}
596
- else:
597
- return x_t
598
-
599
- def singlestep_dpm_solver_third_update(self, x, s, t, r1=1. / 3., r2=2. / 3., model_s=None, model_s1=None,
600
- return_intermediate=False, solver_type='dpm_solver'):
601
- """
602
- Singlestep solver DPM-Solver-3 from time `s` to time `t`.
603
- Args:
604
- x: A pytorch tensor. The initial value at time `s`.
605
- s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
606
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
607
- r1: A `float`. The hyperparameter of the third-order solver.
608
- r2: A `float`. The hyperparameter of the third-order solver.
609
- model_s: A pytorch tensor. The model function evaluated at time `s`.
610
- If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
611
- model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`).
612
- If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it.
613
- return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
614
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
615
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
616
- Returns:
617
- x_t: A pytorch tensor. The approximated solution at time `t`.
618
- """
619
- if solver_type not in ['dpm_solver', 'taylor']:
620
- raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
621
- if r1 is None:
622
- r1 = 1. / 3.
623
- if r2 is None:
624
- r2 = 2. / 3.
625
- ns = self.noise_schedule
626
- dims = x.dim()
627
- lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
628
- h = lambda_t - lambda_s
629
- lambda_s1 = lambda_s + r1 * h
630
- lambda_s2 = lambda_s + r2 * h
631
- s1 = ns.inverse_lambda(lambda_s1)
632
- s2 = ns.inverse_lambda(lambda_s2)
633
- log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff(
634
- s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t)
635
- sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(
636
- s2), ns.marginal_std(t)
637
- alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t)
638
-
639
- if self.predict_x0:
640
- phi_11 = torch.expm1(-r1 * h)
641
- phi_12 = torch.expm1(-r2 * h)
642
- phi_1 = torch.expm1(-h)
643
- phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1.
644
- phi_2 = phi_1 / h + 1.
645
- phi_3 = phi_2 / h - 0.5
646
-
647
- if model_s is None:
648
- model_s = self.model_fn(x, s)
649
- if model_s1 is None:
650
- x_s1 = (
651
- expand_dims(sigma_s1 / sigma_s, dims) * x
652
- - expand_dims(alpha_s1 * phi_11, dims) * model_s
653
- )
654
- model_s1 = self.model_fn(x_s1, s1)
655
- x_s2 = (
656
- expand_dims(sigma_s2 / sigma_s, dims) * x
657
- - expand_dims(alpha_s2 * phi_12, dims) * model_s
658
- + r2 / r1 * expand_dims(alpha_s2 * phi_22, dims) * (model_s1 - model_s)
659
- )
660
- model_s2 = self.model_fn(x_s2, s2)
661
- if solver_type == 'dpm_solver':
662
- x_t = (
663
- expand_dims(sigma_t / sigma_s, dims) * x
664
- - expand_dims(alpha_t * phi_1, dims) * model_s
665
- + (1. / r2) * expand_dims(alpha_t * phi_2, dims) * (model_s2 - model_s)
666
- )
667
- elif solver_type == 'taylor':
668
- D1_0 = (1. / r1) * (model_s1 - model_s)
669
- D1_1 = (1. / r2) * (model_s2 - model_s)
670
- D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
671
- D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
672
- x_t = (
673
- expand_dims(sigma_t / sigma_s, dims) * x
674
- - expand_dims(alpha_t * phi_1, dims) * model_s
675
- + expand_dims(alpha_t * phi_2, dims) * D1
676
- - expand_dims(alpha_t * phi_3, dims) * D2
677
- )
678
- else:
679
- phi_11 = torch.expm1(r1 * h)
680
- phi_12 = torch.expm1(r2 * h)
681
- phi_1 = torch.expm1(h)
682
- phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1.
683
- phi_2 = phi_1 / h - 1.
684
- phi_3 = phi_2 / h - 0.5
685
-
686
- if model_s is None:
687
- model_s = self.model_fn(x, s)
688
- if model_s1 is None:
689
- x_s1 = (
690
- expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x
691
- - expand_dims(sigma_s1 * phi_11, dims) * model_s
692
- )
693
- model_s1 = self.model_fn(x_s1, s1)
694
- x_s2 = (
695
- expand_dims(torch.exp(log_alpha_s2 - log_alpha_s), dims) * x
696
- - expand_dims(sigma_s2 * phi_12, dims) * model_s
697
- - r2 / r1 * expand_dims(sigma_s2 * phi_22, dims) * (model_s1 - model_s)
698
- )
699
- model_s2 = self.model_fn(x_s2, s2)
700
- if solver_type == 'dpm_solver':
701
- x_t = (
702
- expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
703
- - expand_dims(sigma_t * phi_1, dims) * model_s
704
- - (1. / r2) * expand_dims(sigma_t * phi_2, dims) * (model_s2 - model_s)
705
- )
706
- elif solver_type == 'taylor':
707
- D1_0 = (1. / r1) * (model_s1 - model_s)
708
- D1_1 = (1. / r2) * (model_s2 - model_s)
709
- D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
710
- D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
711
- x_t = (
712
- expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
713
- - expand_dims(sigma_t * phi_1, dims) * model_s
714
- - expand_dims(sigma_t * phi_2, dims) * D1
715
- - expand_dims(sigma_t * phi_3, dims) * D2
716
- )
717
-
718
- if return_intermediate:
719
- return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2}
720
- else:
721
- return x_t
722
-
723
- def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"):
724
- """
725
- Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`.
726
- Args:
727
- x: A pytorch tensor. The initial value at time `s`.
728
- model_prev_list: A list of pytorch tensor. The previous computed model values.
729
- t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
730
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
731
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
732
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
733
- Returns:
734
- x_t: A pytorch tensor. The approximated solution at time `t`.
735
- """
736
- if solver_type not in ['dpm_solver', 'taylor']:
737
- raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
738
- ns = self.noise_schedule
739
- dims = x.dim()
740
- model_prev_1, model_prev_0 = model_prev_list
741
- t_prev_1, t_prev_0 = t_prev_list
742
- lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda(
743
- t_prev_0), ns.marginal_lambda(t)
744
- log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
745
- sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
746
- alpha_t = torch.exp(log_alpha_t)
747
-
748
- h_0 = lambda_prev_0 - lambda_prev_1
749
- h = lambda_t - lambda_prev_0
750
- r0 = h_0 / h
751
- D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)
752
- if self.predict_x0:
753
- if solver_type == 'dpm_solver':
754
- x_t = (
755
- expand_dims(sigma_t / sigma_prev_0, dims) * x
756
- - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
757
- - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0
758
- )
759
- elif solver_type == 'taylor':
760
- x_t = (
761
- expand_dims(sigma_t / sigma_prev_0, dims) * x
762
- - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
763
- + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0
764
- )
765
- else:
766
- if solver_type == 'dpm_solver':
767
- x_t = (
768
- expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
769
- - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
770
- - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0
771
- )
772
- elif solver_type == 'taylor':
773
- x_t = (
774
- expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
775
- - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
776
- - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0
777
- )
778
- return x_t
779
-
780
- def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'):
781
- """
782
- Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`.
783
- Args:
784
- x: A pytorch tensor. The initial value at time `s`.
785
- model_prev_list: A list of pytorch tensor. The previous computed model values.
786
- t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
787
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
788
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
789
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
790
- Returns:
791
- x_t: A pytorch tensor. The approximated solution at time `t`.
792
- """
793
- ns = self.noise_schedule
794
- dims = x.dim()
795
- model_prev_2, model_prev_1, model_prev_0 = model_prev_list
796
- t_prev_2, t_prev_1, t_prev_0 = t_prev_list
797
- lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda(
798
- t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t)
799
- log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
800
- sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
801
- alpha_t = torch.exp(log_alpha_t)
802
-
803
- h_1 = lambda_prev_1 - lambda_prev_2
804
- h_0 = lambda_prev_0 - lambda_prev_1
805
- h = lambda_t - lambda_prev_0
806
- r0, r1 = h_0 / h, h_1 / h
807
- D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)
808
- D1_1 = expand_dims(1. / r1, dims) * (model_prev_1 - model_prev_2)
809
- D1 = D1_0 + expand_dims(r0 / (r0 + r1), dims) * (D1_0 - D1_1)
810
- D2 = expand_dims(1. / (r0 + r1), dims) * (D1_0 - D1_1)
811
- if self.predict_x0:
812
- x_t = (
813
- expand_dims(sigma_t / sigma_prev_0, dims) * x
814
- - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
815
- + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1
816
- - expand_dims(alpha_t * ((torch.exp(-h) - 1. + h) / h ** 2 - 0.5), dims) * D2
817
- )
818
- else:
819
- x_t = (
820
- expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
821
- - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
822
- - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1
823
- - expand_dims(sigma_t * ((torch.exp(h) - 1. - h) / h ** 2 - 0.5), dims) * D2
824
- )
825
- return x_t
826
-
827
- def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpm_solver', r1=None,
828
- r2=None):
829
- """
830
- Singlestep DPM-Solver with the order `order` from time `s` to time `t`.
831
- Args:
832
- x: A pytorch tensor. The initial value at time `s`.
833
- s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
834
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
835
- order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
836
- return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
837
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
838
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
839
- r1: A `float`. The hyperparameter of the second-order or third-order solver.
840
- r2: A `float`. The hyperparameter of the third-order solver.
841
- Returns:
842
- x_t: A pytorch tensor. The approximated solution at time `t`.
843
- """
844
- if order == 1:
845
- return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate)
846
- elif order == 2:
847
- return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate,
848
- solver_type=solver_type, r1=r1)
849
- elif order == 3:
850
- return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate,
851
- solver_type=solver_type, r1=r1, r2=r2)
852
- else:
853
- raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
854
-
855
- def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpm_solver'):
856
- """
857
- Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`.
858
- Args:
859
- x: A pytorch tensor. The initial value at time `s`.
860
- model_prev_list: A list of pytorch tensor. The previous computed model values.
861
- t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
862
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
863
- order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
864
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
865
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
866
- Returns:
867
- x_t: A pytorch tensor. The approximated solution at time `t`.
868
- """
869
- if order == 1:
870
- return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1])
871
- elif order == 2:
872
- return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
873
- elif order == 3:
874
- return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
875
- else:
876
- raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
877
-
878
- def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5,
879
- solver_type='dpm_solver'):
880
- """
881
- The adaptive step size solver based on singlestep DPM-Solver.
882
- Args:
883
- x: A pytorch tensor. The initial value at time `t_T`.
884
- order: A `int`. The (higher) order of the solver. We only support order == 2 or 3.
885
- t_T: A `float`. The starting time of the sampling (default is T).
886
- t_0: A `float`. The ending time of the sampling (default is epsilon).
887
- h_init: A `float`. The initial step size (for logSNR).
888
- atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1].
889
- rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05.
890
- theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1].
891
- t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the
892
- current time and `t_0` is less than `t_err`. The default setting is 1e-5.
893
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
894
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
895
- Returns:
896
- x_0: A pytorch tensor. The approximated solution at time `t_0`.
897
- [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021.
898
- """
899
- ns = self.noise_schedule
900
- s = t_T * torch.ones((x.shape[0],)).to(x)
901
- lambda_s = ns.marginal_lambda(s)
902
- lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x))
903
- h = h_init * torch.ones_like(s).to(x)
904
- x_prev = x
905
- nfe = 0
906
- if order == 2:
907
- r1 = 0.5
908
- lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True)
909
- higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1,
910
- solver_type=solver_type,
911
- **kwargs)
912
- elif order == 3:
913
- r1, r2 = 1. / 3., 2. / 3.
914
- lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1,
915
- return_intermediate=True,
916
- solver_type=solver_type)
917
- higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2,
918
- solver_type=solver_type,
919
- **kwargs)
920
- else:
921
- raise ValueError("For adaptive step size solver, order must be 2 or 3, got {}".format(order))
922
- while torch.abs((s - t_0)).mean() > t_err:
923
- t = ns.inverse_lambda(lambda_s + h)
924
- x_lower, lower_noise_kwargs = lower_update(x, s, t)
925
- x_higher = higher_update(x, s, t, **lower_noise_kwargs)
926
- delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev)))
927
- norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True))
928
- E = norm_fn((x_higher - x_lower) / delta).max()
929
- if torch.all(E <= 1.):
930
- x = x_higher
931
- s = t
932
- x_prev = x_lower
933
- lambda_s = ns.marginal_lambda(s)
934
- h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s)
935
- nfe += order
936
- print('adaptive solver nfe', nfe)
937
- return x
938
-
939
- def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform',
940
- method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver',
941
- atol=0.0078, rtol=0.05,
942
- ):
943
- """
944
- Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`.
945
- =====================================================
946
- We support the following algorithms for both noise prediction model and data prediction model:
947
- - 'singlestep':
948
- Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver.
949
- We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps).
950
- The total number of function evaluations (NFE) == `steps`.
951
- Given a fixed NFE == `steps`, the sampling procedure is:
952
- - If `order` == 1:
953
- - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM).
954
- - If `order` == 2:
955
- - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling.
956
- - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2.
957
- - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
958
- - If `order` == 3:
959
- - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
960
- - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
961
- - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1.
962
- - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2.
963
- - 'multistep':
964
- Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`.
965
- We initialize the first `order` values by lower order multistep solvers.
966
- Given a fixed NFE == `steps`, the sampling procedure is:
967
- Denote K = steps.
968
- - If `order` == 1:
969
- - We use K steps of DPM-Solver-1 (i.e. DDIM).
970
- - If `order` == 2:
971
- - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2.
972
- - If `order` == 3:
973
- - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3.
974
- - 'singlestep_fixed':
975
- Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3).
976
- We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE.
977
- - 'adaptive':
978
- Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper).
979
- We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`.
980
- You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs
981
- (NFE) and the sample quality.
982
- - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2.
983
- - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3.
984
- =====================================================
985
- Some advices for choosing the algorithm:
986
- - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs:
987
- Use singlestep DPM-Solver ("DPM-Solver-fast" in the paper) with `order = 3`.
988
- e.g.
989
- >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=False)
990
- >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,
991
- skip_type='time_uniform', method='singlestep')
992
- - For **guided sampling with large guidance scale** by DPMs:
993
- Use multistep DPM-Solver with `predict_x0 = True` and `order = 2`.
994
- e.g.
995
- >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True)
996
- >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2,
997
- skip_type='time_uniform', method='multistep')
998
- We support three types of `skip_type`:
999
- - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images**
1000
- - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**.
1001
- - 'time_quadratic': quadratic time for the time steps.
1002
- =====================================================
1003
- Args:
1004
- x: A pytorch tensor. The initial value at time `t_start`
1005
- e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution.
1006
- steps: A `int`. The total number of function evaluations (NFE).
1007
- t_start: A `float`. The starting time of the sampling.
1008
- If `T` is None, we use self.noise_schedule.T (default is 1.0).
1009
- t_end: A `float`. The ending time of the sampling.
1010
- If `t_end` is None, we use 1. / self.noise_schedule.total_N.
1011
- e.g. if total_N == 1000, we have `t_end` == 1e-3.
1012
- For discrete-time DPMs:
1013
- - We recommend `t_end` == 1. / self.noise_schedule.total_N.
1014
- For continuous-time DPMs:
1015
- - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15.
1016
- order: A `int`. The order of DPM-Solver.
1017
- skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'.
1018
- method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'.
1019
- denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step.
1020
- Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1).
1021
- This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and
1022
- score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID
1023
- for diffusion models sampling by diffusion SDEs for low-resolutional images
1024
- (such as CIFAR-10). However, we observed that such trick does not matter for
1025
- high-resolutional images. As it needs an additional NFE, we do not recommend
1026
- it for high-resolutional images.
1027
- lower_order_final: A `bool`. Whether to use lower order solvers at the final steps.
1028
- Only valid for `method=multistep` and `steps < 15`. We empirically find that
1029
- this trick is a key to stabilizing the sampling by DPM-Solver with very few steps
1030
- (especially for steps <= 10). So we recommend to set it to be `True`.
1031
- solver_type: A `str`. The taylor expansion type for the solver. `dpm_solver` or `taylor`. We recommend `dpm_solver`.
1032
- atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
1033
- rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
1034
- Returns:
1035
- x_end: A pytorch tensor. The approximated solution at time `t_end`.
1036
- """
1037
- t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end
1038
- t_T = self.noise_schedule.T if t_start is None else t_start
1039
- device = x.device
1040
- if method == 'adaptive':
1041
- with torch.no_grad():
1042
- x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol,
1043
- solver_type=solver_type)
1044
- elif method == 'multistep':
1045
- assert steps >= order
1046
- timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)
1047
- assert timesteps.shape[0] - 1 == steps
1048
- with torch.no_grad():
1049
- vec_t = timesteps[0].expand((x.shape[0]))
1050
- model_prev_list = [self.model_fn(x, vec_t)]
1051
- t_prev_list = [vec_t]
1052
- # Init the first `order` values by lower order multistep DPM-Solver.
1053
- for init_order in tqdm(range(1, order), desc="DPM init order"):
1054
- vec_t = timesteps[init_order].expand(x.shape[0])
1055
- x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, init_order,
1056
- solver_type=solver_type)
1057
- model_prev_list.append(self.model_fn(x, vec_t))
1058
- t_prev_list.append(vec_t)
1059
- # Compute the remaining values by `order`-th order multistep DPM-Solver.
1060
- for step in tqdm(range(order, steps + 1), desc="DPM multistep"):
1061
- vec_t = timesteps[step].expand(x.shape[0])
1062
- if lower_order_final and steps < 15:
1063
- step_order = min(order, steps + 1 - step)
1064
- else:
1065
- step_order = order
1066
- x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, step_order,
1067
- solver_type=solver_type)
1068
- for i in range(order - 1):
1069
- t_prev_list[i] = t_prev_list[i + 1]
1070
- model_prev_list[i] = model_prev_list[i + 1]
1071
- t_prev_list[-1] = vec_t
1072
- # We do not need to evaluate the final model value.
1073
- if step < steps:
1074
- model_prev_list[-1] = self.model_fn(x, vec_t)
1075
- elif method in ['singlestep', 'singlestep_fixed']:
1076
- if method == 'singlestep':
1077
- timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order,
1078
- skip_type=skip_type,
1079
- t_T=t_T, t_0=t_0,
1080
- device=device)
1081
- elif method == 'singlestep_fixed':
1082
- K = steps // order
1083
- orders = [order, ] * K
1084
- timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device)
1085
- for i, order in enumerate(orders):
1086
- t_T_inner, t_0_inner = timesteps_outer[i], timesteps_outer[i + 1]
1087
- timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=t_T_inner.item(), t_0=t_0_inner.item(),
1088
- N=order, device=device)
1089
- lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner)
1090
- vec_s, vec_t = t_T_inner.tile(x.shape[0]), t_0_inner.tile(x.shape[0])
1091
- h = lambda_inner[-1] - lambda_inner[0]
1092
- r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h
1093
- r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h
1094
- x = self.singlestep_dpm_solver_update(x, vec_s, vec_t, order, solver_type=solver_type, r1=r1, r2=r2)
1095
- if denoise_to_zero:
1096
- x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0)
1097
- return x
1098
-
1099
-
1100
- #############################################################
1101
- # other utility functions
1102
- #############################################################
1103
-
1104
- def interpolate_fn(x, xp, yp):
1105
- """
1106
- A piecewise linear function y = f(x), using xp and yp as keypoints.
1107
- We implement f(x) in a differentiable way (i.e. applicable for autograd).
1108
- The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.)
1109
- Args:
1110
- x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver).
1111
- xp: PyTorch tensor with shape [C, K], where K is the number of keypoints.
1112
- yp: PyTorch tensor with shape [C, K].
1113
- Returns:
1114
- The function values f(x), with shape [N, C].
1115
- """
1116
- N, K = x.shape[0], xp.shape[1]
1117
- all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2)
1118
- sorted_all_x, x_indices = torch.sort(all_x, dim=2)
1119
- x_idx = torch.argmin(x_indices, dim=2)
1120
- cand_start_idx = x_idx - 1
1121
- start_idx = torch.where(
1122
- torch.eq(x_idx, 0),
1123
- torch.tensor(1, device=x.device),
1124
- torch.where(
1125
- torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
1126
- ),
1127
- )
1128
- end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1)
1129
- start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2)
1130
- end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2)
1131
- start_idx2 = torch.where(
1132
- torch.eq(x_idx, 0),
1133
- torch.tensor(0, device=x.device),
1134
- torch.where(
1135
- torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
1136
- ),
1137
- )
1138
- y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1)
1139
- start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2)
1140
- end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2)
1141
- cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x)
1142
- return cand
1143
-
1144
-
1145
- def expand_dims(v, dims):
1146
- """
1147
- Expand the tensor `v` to the dim `dims`.
1148
- Args:
1149
- `v`: a PyTorch tensor with shape [N].
1150
- `dim`: a `int`.
1151
- Returns:
1152
- a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.
1153
- """
1154
- return v[(...,) + (None,) * (dims - 1)]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AISuperheroes/05GR-Image-To-Multilingual-OCR/app.py DELETED
@@ -1,54 +0,0 @@
1
- import pandas as pd
2
- import PIL
3
- from PIL import Image
4
- from PIL import ImageDraw
5
- import gradio as gr
6
- import torch
7
- import easyocr
8
-
9
- torch.hub.download_url_to_file('https://github.com/JaidedAI/EasyOCR/raw/master/examples/english.png', 'english.png')
10
- torch.hub.download_url_to_file('https://github.com/JaidedAI/EasyOCR/raw/master/examples/chinese.jpg', 'chinese.jpg')
11
- torch.hub.download_url_to_file('https://github.com/JaidedAI/EasyOCR/raw/master/examples/japanese.jpg', 'japanese.jpg')
12
- torch.hub.download_url_to_file('https://i.imgur.com/mwQFd7G.jpeg', 'Hindi.jpeg')
13
-
14
- def draw_boxes(image, bounds, color='yellow', width=2):
15
- draw = ImageDraw.Draw(image)
16
- for bound in bounds:
17
- p0, p1, p2, p3 = bound[0]
18
- draw.line([*p0, *p1, *p2, *p3, *p0], fill=color, width=width)
19
- return image
20
-
21
- def inference(img, lang):
22
- reader = easyocr.Reader(lang)
23
- bounds = reader.readtext(img.name)
24
- im = PIL.Image.open(img.name)
25
- draw_boxes(im, bounds)
26
- im.save('result.jpg')
27
- return ['result.jpg', pd.DataFrame(bounds).iloc[: , 1:]]
28
-
29
- title = 'Image To Optical Character Recognition'
30
- description = 'Multilingual OCR which works conveniently on all devices in multiple languages.'
31
- article = "<p style='text-align: center'></p>"
32
- examples = [['english.png',['en']],['chinese.jpg',['ch_sim', 'en']],['japanese.jpg',['ja', 'en']],['Hindi.jpeg',['hi', 'en']]]
33
- css = ".output_image, .input_image {height: 40rem !important; width: 100% !important;}"
34
- choices = [
35
- "ch_sim",
36
- "ch_tra",
37
- "de",
38
- "en",
39
- "es",
40
- "ja",
41
- "hi",
42
- "ru"
43
- ]
44
- gr.Interface(
45
- inference,
46
- [gr.inputs.Image(type='file', label='Input'),gr.inputs.CheckboxGroup(choices, type="value", default=['en'], label='language')],
47
- [gr.outputs.Image(type='file', label='Output'), gr.outputs.Dataframe(headers=['text', 'confidence'])],
48
- title=title,
49
- description=description,
50
- article=article,
51
- examples=examples,
52
- css=css,
53
- enable_queue=True
54
- ).launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AP123/dreamgaussian/mesh_utils.py DELETED
@@ -1,147 +0,0 @@
1
- import numpy as np
2
- import pymeshlab as pml
3
-
4
-
5
- def poisson_mesh_reconstruction(points, normals=None):
6
- # points/normals: [N, 3] np.ndarray
7
-
8
- import open3d as o3d
9
-
10
- pcd = o3d.geometry.PointCloud()
11
- pcd.points = o3d.utility.Vector3dVector(points)
12
-
13
- # outlier removal
14
- pcd, ind = pcd.remove_statistical_outlier(nb_neighbors=20, std_ratio=10)
15
-
16
- # normals
17
- if normals is None:
18
- pcd.estimate_normals()
19
- else:
20
- pcd.normals = o3d.utility.Vector3dVector(normals[ind])
21
-
22
- # visualize
23
- o3d.visualization.draw_geometries([pcd], point_show_normal=False)
24
-
25
- mesh, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(
26
- pcd, depth=9
27
- )
28
- vertices_to_remove = densities < np.quantile(densities, 0.1)
29
- mesh.remove_vertices_by_mask(vertices_to_remove)
30
-
31
- # visualize
32
- o3d.visualization.draw_geometries([mesh])
33
-
34
- vertices = np.asarray(mesh.vertices)
35
- triangles = np.asarray(mesh.triangles)
36
-
37
- print(
38
- f"[INFO] poisson mesh reconstruction: {points.shape} --> {vertices.shape} / {triangles.shape}"
39
- )
40
-
41
- return vertices, triangles
42
-
43
-
44
- def decimate_mesh(
45
- verts, faces, target, backend="pymeshlab", remesh=False, optimalplacement=True
46
- ):
47
- # optimalplacement: default is True, but for flat mesh must turn False to prevent spike artifect.
48
-
49
- _ori_vert_shape = verts.shape
50
- _ori_face_shape = faces.shape
51
-
52
- if backend == "pyfqmr":
53
- import pyfqmr
54
-
55
- solver = pyfqmr.Simplify()
56
- solver.setMesh(verts, faces)
57
- solver.simplify_mesh(target_count=target, preserve_border=False, verbose=False)
58
- verts, faces, normals = solver.getMesh()
59
- else:
60
- m = pml.Mesh(verts, faces)
61
- ms = pml.MeshSet()
62
- ms.add_mesh(m, "mesh") # will copy!
63
-
64
- # filters
65
- # ms.meshing_decimation_clustering(threshold=pml.Percentage(1))
66
- ms.meshing_decimation_quadric_edge_collapse(
67
- targetfacenum=int(target), optimalplacement=optimalplacement
68
- )
69
-
70
- if remesh:
71
- # ms.apply_coord_taubin_smoothing()
72
- ms.meshing_isotropic_explicit_remeshing(
73
- iterations=3, targetlen=pml.Percentage(1)
74
- )
75
-
76
- # extract mesh
77
- m = ms.current_mesh()
78
- verts = m.vertex_matrix()
79
- faces = m.face_matrix()
80
-
81
- print(
82
- f"[INFO] mesh decimation: {_ori_vert_shape} --> {verts.shape}, {_ori_face_shape} --> {faces.shape}"
83
- )
84
-
85
- return verts, faces
86
-
87
-
88
- def clean_mesh(
89
- verts,
90
- faces,
91
- v_pct=1,
92
- min_f=64,
93
- min_d=20,
94
- repair=True,
95
- remesh=True,
96
- remesh_size=0.01,
97
- ):
98
- # verts: [N, 3]
99
- # faces: [N, 3]
100
-
101
- _ori_vert_shape = verts.shape
102
- _ori_face_shape = faces.shape
103
-
104
- m = pml.Mesh(verts, faces)
105
- ms = pml.MeshSet()
106
- ms.add_mesh(m, "mesh") # will copy!
107
-
108
- # filters
109
- ms.meshing_remove_unreferenced_vertices() # verts not refed by any faces
110
-
111
- if v_pct > 0:
112
- ms.meshing_merge_close_vertices(
113
- threshold=pml.Percentage(v_pct)
114
- ) # 1/10000 of bounding box diagonal
115
-
116
- ms.meshing_remove_duplicate_faces() # faces defined by the same verts
117
- ms.meshing_remove_null_faces() # faces with area == 0
118
-
119
- if min_d > 0:
120
- ms.meshing_remove_connected_component_by_diameter(
121
- mincomponentdiag=pml.Percentage(min_d)
122
- )
123
-
124
- if min_f > 0:
125
- ms.meshing_remove_connected_component_by_face_number(mincomponentsize=min_f)
126
-
127
- if repair:
128
- # ms.meshing_remove_t_vertices(method=0, threshold=40, repeat=True)
129
- ms.meshing_repair_non_manifold_edges(method=0)
130
- ms.meshing_repair_non_manifold_vertices(vertdispratio=0)
131
-
132
- if remesh:
133
- # ms.apply_coord_taubin_smoothing()
134
- ms.meshing_isotropic_explicit_remeshing(
135
- iterations=3, targetlen=pml.AbsoluteValue(remesh_size)
136
- )
137
-
138
- # extract mesh
139
- m = ms.current_mesh()
140
- verts = m.vertex_matrix()
141
- faces = m.face_matrix()
142
-
143
- print(
144
- f"[INFO] mesh cleaning: {_ori_vert_shape} --> {verts.shape}, {_ori_face_shape} --> {faces.shape}"
145
- )
146
-
147
- return verts, faces
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AbandonedMuse/UnlimitedMusicGen/Makefile DELETED
@@ -1,21 +0,0 @@
1
- default: linter tests
2
-
3
- install:
4
- pip install -U pip
5
- pip install -U -e '.[dev]'
6
-
7
- linter:
8
- flake8 audiocraft && mypy audiocraft
9
- flake8 tests && mypy tests
10
-
11
- tests:
12
- coverage run -m pytest tests
13
- coverage report --include 'audiocraft/*'
14
-
15
- docs:
16
- pdoc3 --html -o docs -f audiocraft
17
-
18
- dist:
19
- python setup.py sdist
20
-
21
- .PHONY: linter tests docs dist
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Abhilashvj/planogram-compliance/utils/__init__.py DELETED
@@ -1,88 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- """
3
- utils/initialization
4
- """
5
-
6
- import contextlib
7
- import platform
8
- import threading
9
-
10
-
11
- def emojis(str=""):
12
- # Return platform-dependent emoji-safe version of string
13
- return (
14
- str.encode().decode("ascii", "ignore")
15
- if platform.system() == "Windows"
16
- else str
17
- )
18
-
19
-
20
- class TryExcept(contextlib.ContextDecorator):
21
- # YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager
22
- def __init__(self, msg=""):
23
- self.msg = msg
24
-
25
- def __enter__(self):
26
- pass
27
-
28
- def __exit__(self, exc_type, value, traceback):
29
- if value:
30
- print(emojis(f"{self.msg}{': ' if self.msg else ''}{value}"))
31
- return True
32
-
33
-
34
- def threaded(func):
35
- # Multi-threads a target function and returns thread. Usage: @threaded decorator
36
- def wrapper(*args, **kwargs):
37
- thread = threading.Thread(
38
- target=func, args=args, kwargs=kwargs, daemon=True
39
- )
40
- thread.start()
41
- return thread
42
-
43
- return wrapper
44
-
45
-
46
- def join_threads(verbose=False):
47
- # Join all daemon threads, i.e. atexit.register(lambda: join_threads())
48
- main_thread = threading.current_thread()
49
- for t in threading.enumerate():
50
- if t is not main_thread:
51
- if verbose:
52
- print(f"Joining thread {t.name}")
53
- t.join()
54
-
55
-
56
- def notebook_init(verbose=True):
57
- # Check system software and hardware
58
- print("Checking setup...")
59
-
60
- import os
61
- import shutil
62
-
63
- from utils.general import check_font, check_requirements, is_colab
64
- from utils.torch_utils import select_device # imports
65
-
66
- check_font()
67
-
68
- import psutil
69
- from IPython import display # to display images and clear console output
70
-
71
- if is_colab():
72
- shutil.rmtree(
73
- "/content/sample_data", ignore_errors=True
74
- ) # remove colab /sample_data directory
75
-
76
- # System info
77
- if verbose:
78
- gb = 1 << 30 # bytes to GiB (1024 ** 3)
79
- ram = psutil.virtual_memory().total
80
- total, used, free = shutil.disk_usage("/")
81
- display.clear_output()
82
- s = f"({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)"
83
- else:
84
- s = ""
85
-
86
- select_device(newline=False)
87
- print(emojis(f"Setup complete ✅ {s}"))
88
- return display
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aditya9790/yolo7-object-tracking/detect.py DELETED
@@ -1,163 +0,0 @@
1
- import argparse
2
- import time
3
- from pathlib import Path
4
-
5
- import cv2
6
- import torch
7
- import torch.backends.cudnn as cudnn
8
- from numpy import random
9
-
10
- from models.experimental import attempt_load
11
- from utils.datasets import LoadStreams, LoadImages
12
- from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
13
- scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
14
- from utils.plots import plot_one_box
15
- from utils.torch_utils import select_device, load_classifier, time_synchronized, TracedModel
16
-
17
-
18
- def detect(save_img=False):
19
- source, weights, view_img, save_txt, imgsz, trace = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size, not opt.no_trace
20
- save_img = not opt.nosave and not source.endswith('.txt') # save inference images
21
- webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
22
- ('rtsp://', 'rtmp://', 'http://', 'https://'))
23
-
24
- # Directories
25
- save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
26
- (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
27
-
28
- # Initialize
29
- set_logging()
30
- device = select_device(opt.device)
31
- half = device.type != 'cpu' # half precision only supported on CUDA
32
-
33
- # Load model
34
- model = attempt_load(weights, map_location=device) # load FP32 model
35
- stride = int(model.stride.max()) # model stride
36
- imgsz = check_img_size(imgsz, s=stride) # check img_size
37
-
38
- if trace:
39
- model = TracedModel(model, device, opt.img_size)
40
-
41
- if half:
42
- model.half() # to FP16
43
-
44
- # Second-stage classifier
45
- classify = False
46
- if classify:
47
- modelc = load_classifier(name='resnet101', n=2) # initialize
48
- modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval()
49
-
50
- # Set Dataloader
51
- vid_path, vid_writer = None, None
52
- if webcam:
53
- view_img = check_imshow()
54
- cudnn.benchmark = True # set True to speed up constant image size inference
55
- dataset = LoadStreams(source, img_size=imgsz, stride=stride)
56
- else:
57
- dataset = LoadImages(source, img_size=imgsz, stride=stride)
58
-
59
- # Get names and colors
60
- names = model.module.names if hasattr(model, 'module') else model.names
61
- colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
62
-
63
- # Run inference
64
- if device.type != 'cpu':
65
- model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
66
- old_img_w = old_img_h = imgsz
67
- old_img_b = 1
68
-
69
- t0 = time.time()
70
- for path, img, im0s, vid_cap in dataset:
71
- img = torch.from_numpy(img).to(device)
72
- img = img.half() if half else img.float() # uint8 to fp16/32
73
- img /= 255.0 # 0 - 255 to 0.0 - 1.0
74
- if img.ndimension() == 3:
75
- img = img.unsqueeze(0)
76
-
77
- # Warmup
78
- if device.type != 'cpu' and (old_img_b != img.shape[0] or old_img_h != img.shape[2] or old_img_w != img.shape[3]):
79
- old_img_b = img.shape[0]
80
- old_img_h = img.shape[2]
81
- old_img_w = img.shape[3]
82
- for i in range(3):
83
- model(img, augment=opt.augment)[0]
84
-
85
- # Inference
86
- t1 = time_synchronized()
87
- with torch.no_grad(): # Calculating gradients would cause a GPU memory leak
88
- pred = model(img, augment=opt.augment)[0]
89
- t2 = time_synchronized()
90
-
91
- # Apply NMS
92
- pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
93
- t3 = time_synchronized()
94
-
95
- # Apply Classifier
96
- if classify:
97
- pred = apply_classifier(pred, modelc, img, im0s)
98
-
99
- # Process detections
100
- for i, det in enumerate(pred): # detections per image
101
- if webcam: # batch_size >= 1
102
- p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count
103
- else:
104
- p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
105
-
106
- p = Path(p) # to Path
107
- save_path = str(save_dir / p.name) # img.jpg
108
- txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
109
- gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
110
- if len(det):
111
- # Rescale boxes from img_size to im0 size
112
- det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
113
-
114
- # Print results
115
- for c in det[:, -1].unique():
116
- n = (det[:, -1] == c).sum() # detections per class
117
- s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
118
-
119
- # Write results
120
- for *xyxy, conf, cls in reversed(det):
121
- if save_txt: # Write to file
122
- xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
123
- line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format
124
- with open(txt_path + '.txt', 'a') as f:
125
- f.write(('%g ' * len(line)).rstrip() % line + '\n')
126
-
127
- if save_img or view_img: # Add bbox to image
128
- label = f'{names[int(cls)]} {conf:.2f}'
129
- plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=1)
130
-
131
- # Print time (inference + NMS)
132
- print(f'{s}Done. ({(1E3 * (t2 - t1)):.1f}ms) Inference, ({(1E3 * (t3 - t2)):.1f}ms) NMS')
133
-
134
- # Stream results
135
- if view_img:
136
- cv2.imshow(str(p), im0)
137
- cv2.waitKey(1) # 1 millisecond
138
-
139
- # Save results (image with detections)
140
- if save_img:
141
- if dataset.mode == 'image':
142
- cv2.imwrite(save_path, im0)
143
- print(f" The image with the result is saved in: {save_path}")
144
- else: # 'video' or 'stream'
145
- if vid_path != save_path: # new video
146
- vid_path = save_path
147
- if isinstance(vid_writer, cv2.VideoWriter):
148
- vid_writer.release() # release previous video writer
149
- if vid_cap: # video
150
- fps = vid_cap.get(cv2.CAP_PROP_FPS)
151
- w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
152
- h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
153
- else: # stream
154
- fps, w, h = 30, im0.shape[1], im0.shape[0]
155
- save_path += '.mp4'
156
- vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
157
- vid_writer.write(im0)
158
-
159
- if save_txt or save_img:
160
- s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
161
- #print(f"Results saved to {save_dir}{s}")
162
-
163
- print(f'Done. ({time.time() - t0:.3f}s)')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dialog/methods/ButtonMethods.js DELETED
@@ -1,333 +0,0 @@
1
- export default {
2
- getChoice(index) {
3
- var choicesSizer = this.childrenMap.choicesSizer;
4
- if (choicesSizer) {
5
- return choicesSizer.getButton(index);
6
- } else {
7
- return undefined;
8
- }
9
- },
10
-
11
- getAction(index) {
12
- return this.childrenMap.actionsSizer.getButton(index);
13
- },
14
-
15
- getToolbar(index) {
16
- return this.childrenMap.toolbarSizer.getButton(index);
17
- },
18
-
19
- getLeftToolbar(index) {
20
- return this.childrenMap.leftToolbarSizer.getButton(index);
21
- },
22
-
23
- setChoiceEnable(index, enabled) {
24
- var choicesSizer = this.childrenMap.choicesSizer;
25
- if (choicesSizer) {
26
- choicesSizer.setButtonEnable(index, enabled);
27
- }
28
- return this;
29
- },
30
-
31
- setActionEnable(index, enabled) {
32
- this.childrenMap.actionsSizer.setButtonEnable(index, enabled);
33
- return this;
34
- },
35
-
36
- setToolbarEnable(index, enabled) {
37
- this.childrenMap.toolbarSizer.setButtonEnable(index, enabled);
38
- return this;
39
- },
40
-
41
- setLeftToolbarEnable(index, enabled) {
42
- this.childrenMap.leftToolbarSizer.setButtonEnable(index, enabled);
43
- return this;
44
- },
45
-
46
- toggleChoiceEnable(index) {
47
- var choicesSizer = this.childrenMap.choicesSizer;
48
- if (choicesSizer) {
49
- choicesSizer.toggleButtonEnable(index);
50
- }
51
- return this;
52
- },
53
-
54
- toggleActionEnable(index) {
55
- this.childrenMap.actionsSizer.toggleButtonEnable(index);
56
- return this;
57
- },
58
-
59
- toggleToolbarEnable(index) {
60
- this.childrenMap.toolbarSizer.toggleButtonEnable(index);
61
- return this;
62
- },
63
-
64
- toggleLeftToolbarEnable(index) {
65
- this.childrenMap.leftToolbarSizer.toggleButtonEnable(index);
66
- return this;
67
- },
68
-
69
- getChoiceEnable(index) {
70
- var choicesSizer = this.childrenMap.choicesSizer;
71
- if (choicesSizer) {
72
- return choicesSizer.getButtonEnable(index);
73
- } else {
74
- return false;
75
- }
76
- },
77
-
78
- getActionEnable(index) {
79
- return this.childrenMap.actionsSizer.getButtonEnable(index);
80
- },
81
-
82
- getToolbarEnable(index) {
83
- return this.childrenMap.toolbarSizer.getButtonEnable(index);
84
- },
85
-
86
- getLeftToolbarEnable(index) {
87
- return this.childrenMap.leftToolbarSizer.getButtonEnable(index);
88
- },
89
-
90
- emitChoiceClick(index) {
91
- var choicesSizer = this.childrenMap.choicesSizer;
92
- if (choicesSizer) {
93
- choicesSizer.emitButtonClick(index);
94
- }
95
- return this;
96
- },
97
-
98
- emitActionClick(index) {
99
- this.childrenMap.actionsSizer.emitButtonClick(index);
100
- return this;
101
- },
102
-
103
- emitToolbarClick(index) {
104
- this.childrenMap.toolbarSizer.emitButtonClick(index);
105
- return this;
106
- },
107
-
108
- emitLeftToolbarClick(index) {
109
- this.childrenMap.leftToolbarSizer.emitButtonClick(index);
110
- return this;
111
- },
112
-
113
- showChoice(index) {
114
- var choicesSizer = this.childrenMap.choicesSizer;
115
- if (choicesSizer) {
116
- choicesSizer.showButton(index);
117
- }
118
- return this;
119
- },
120
-
121
- showAction(index) {
122
- this.childrenMap.actionsSizer.showButton(index);
123
- return this;
124
- },
125
-
126
- showToolbar(index) {
127
- this.childrenMap.toolbarSizer.showButton(index);
128
- return this;
129
- },
130
-
131
- showLeftToolbar(index) {
132
- this.childrenMap.leftToolbarSizer.showButton(index);
133
- return this;
134
- },
135
-
136
- hideChoice(index) {
137
- var choicesSizer = this.childrenMap.choicesSizer;
138
- if (choicesSizer) {
139
- choicesSizer.hideButton(index);
140
- }
141
- return this;
142
- },
143
-
144
- hideAction(index) {
145
- this.childrenMap.actionsSizer.hideButton(index);
146
- return this;
147
- },
148
-
149
- hideToolbar(index) {
150
- this.childrenMap.toolbarSizer.hideButton(index);
151
- return this;
152
- },
153
-
154
- hideLeftToolbar(index) {
155
- this.childrenMap.leftToolbarSizer.hideButton(index);
156
- return this;
157
- },
158
-
159
- addChoice(gameObject) {
160
- var choicesSizer = this.childrenMap.choicesSizer;
161
- if (choicesSizer) {
162
- choicesSizer.addButton(gameObject);
163
- }
164
- return this;
165
- },
166
-
167
- addAction(gameObject) {
168
- this.childrenMap.actionsSizer.addButton(gameObject);
169
- return this;
170
- },
171
-
172
- addToolbar(gameObject) {
173
- this.childrenMap.toolbarSizer.addButton(gameObject);
174
- return this;
175
- },
176
-
177
- addLeftToolbar(gameObject) {
178
- this.childrenMap.leftToolbarSizer.addButton(gameObject);
179
- return this;
180
- },
181
-
182
- removeChoice(index, destroyChild) {
183
- var choicesSizer = this.childrenMap.choicesSizer;
184
- if (choicesSizer) {
185
- choicesSizer.removeButton(index, destroyChild);
186
- }
187
- return this;
188
- },
189
-
190
- removeAction(index, destroyChild) {
191
- this.childrenMap.actionsSizer.removeButton(index, destroyChild);
192
- return this;
193
- },
194
-
195
- removeToolbar(index, destroyChild) {
196
- this.childrenMap.toolbarSizer.removeButton(index, destroyChild);
197
- return this;
198
- },
199
-
200
- removeLeftToolbar(index, destroyChild) {
201
- this.childrenMap.leftToolbarSizer.removeButton(index, destroyChild);
202
- return this;
203
- },
204
-
205
- clearChoices(destroyChild) {
206
- var choicesSizer = this.childrenMap.choicesSizer;
207
- if (choicesSizer) {
208
- choicesSizer.clearButtons(destroyChild);
209
- }
210
- return this;
211
- },
212
-
213
- clearActions(destroyChild) {
214
- this.childrenMap.actionsSizer.clearButtons(destroyChild);
215
- return this;
216
- },
217
-
218
- clearToolbar(destroyChild) {
219
- this.childrenMap.toolbarSizer.clearButtons(destroyChild);
220
- return this;
221
- },
222
-
223
- clearLeftToolbar(destroyChild) {
224
- this.childrenMap.leftToolbarSizer.clearButtons(destroyChild);
225
- return this;
226
- },
227
-
228
- forEachChoice(callback, scope) {
229
- var choicesSizer = this.childrenMap.choicesSizer;
230
- if (choicesSizer) {
231
- choicesSizer.forEachButtton(callback, scope);
232
- }
233
- return this;
234
- },
235
-
236
- forEachAction(callback, scope) {
237
- this.childrenMap.actionsSizer.forEachButtton(callback, scope);
238
- return this;
239
- },
240
-
241
- forEachToolbar(callback, scope) {
242
- this.childrenMap.toolbarSizer.forEachButtton(callback, scope);
243
- return this;
244
- },
245
-
246
- forEachLeftToolbar(callback, scope) {
247
- this.childrenMap.leftToolbarSizer.forEachButtton(callback, scope);
248
- return this;
249
- },
250
-
251
- setAllButtonsEnable(enabled) {
252
- if (enabled === undefined) {
253
- enabled = true;
254
- }
255
-
256
- if (this.childrenMap.toolbarSizer) {
257
- this.setToolbarEnable(enabled);
258
- }
259
- if (this.childrenMap.leftToolbarSizer) {
260
- this.setLeftToolbarEnable(enabled);
261
- }
262
- if (this.childrenMap.actionsSizer) {
263
- this.setActionEnable(enabled);
264
- }
265
- if (this.childrenMap.choicesSizer) {
266
- this.setChoiceEnable(enabled);
267
- }
268
-
269
- return this;
270
- },
271
-
272
- // Checkboxes
273
- getChoicesButtonStates() {
274
- var choicesSizer = this.childrenMap.choicesSizer;
275
- if (choicesSizer) {
276
- return choicesSizer.getAllButtonsState();
277
- } else {
278
- return {};
279
- }
280
- },
281
-
282
- getChoicesButtonState(name) {
283
- var choicesSizer = this.childrenMap.choicesSizer;
284
- if (name === undefined) {
285
- if (choicesSizer) {
286
- return choicesSizer.getAllButtonsState();
287
- } else {
288
- return {}
289
- }
290
- } else {
291
- if (choicesSizer) {
292
- return choicesSizer.getButtonState(name);
293
- } else {
294
- return false;
295
- }
296
- }
297
- },
298
-
299
- setChoicesButtonState(name, state) {
300
- var choicesSizer = this.childrenMap.choicesSizer;
301
- if (choicesSizer) {
302
- choicesSizer.setButtonState(name, state);
303
- }
304
- return this;
305
- },
306
-
307
- clearChoicesButtonStates() {
308
- var choicesSizer = this.childrenMap.choicesSizer;
309
- if (choicesSizer) {
310
- choicesSizer.clearAllButtonsState();
311
- }
312
- return this;
313
- },
314
-
315
- // Radio buttons
316
- getChoicesSelectedButtonName() {
317
- var choicesSizer = this.childrenMap.choicesSizer;
318
- if (choicesSizer) {
319
- return choicesSizer.getSelectedButtonName();
320
- } else {
321
- return '';
322
- }
323
- },
324
-
325
- setChoicesSelectedButtonName(name) {
326
- var choicesSizer = this.childrenMap.choicesSizer;
327
- if (choicesSizer) {
328
- choicesSizer.setSelectedButtonName(name);
329
- }
330
- return this;
331
- },
332
-
333
- };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/folder/methods/ConfigurationMethods.js DELETED
@@ -1,37 +0,0 @@
1
- import ScaleMethods from '../../basesizer/ScaleMethods.js';
2
-
3
- var DefaultExpandCallback = function (gameObject, duration) {
4
- ScaleMethods.popUp.call(gameObject, duration, this.expandDirection);
5
- };
6
-
7
- var DefaultCollapseCallback = function (gameObject, duration) {
8
- ScaleMethods.scaleDown.call(gameObject, duration, this.expandDirection)
9
- }
10
-
11
- export default {
12
- setTransitionDuration(duration) {
13
- this.transitionDuration = duration;
14
-
15
- this.childTransition
16
- .setTransitInTime(duration)
17
- .setTransitOutTime(duration);
18
-
19
- return this;
20
- },
21
-
22
- setExpandCallback(callback) {
23
- if (callback === undefined) {
24
- callback = DefaultExpandCallback.bind(this);
25
- }
26
- this.childTransition.setTransitInCallback(callback);
27
- return this;
28
- },
29
-
30
- setCollapseCallback(callback) {
31
- if (callback === undefined) {
32
- callback = DefaultCollapseCallback.bind(this);
33
- }
34
- this.childTransition.setTransitOutCallback(callback);
35
- return this;
36
- }
37
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/index.js DELETED
@@ -1,12 +0,0 @@
1
- import Maker from './Maker.js';
2
- import Make from './Make.js';
3
- import YAMLMake from './YAMLMake.js';
4
- import Builders from './builders/Builders.js';
5
-
6
-
7
- export {
8
- Maker,
9
- Make,
10
- YAMLMake,
11
- Builders,
12
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/ResolveWidth.js DELETED
@@ -1,23 +0,0 @@
1
- import ResolveWidthBase from '../basesizer/ResolveWidth.js';
2
-
3
- var ResolveWidth = function (width) {
4
- var width = ResolveWidthBase.call(this, width);
5
-
6
- // Calculate proportionLength
7
- if ((this.proportionLength === undefined) && (this.orientation === 0)) {
8
- var remainder = width - this.childrenWidth;
9
- if (remainder > 0) {
10
- remainder = width - this.getChildrenWidth(false);
11
- this.proportionLength = remainder / this.childrenProportion;
12
- } else {
13
- this.proportionLength = 0;
14
- if (remainder < 0) {
15
- // Warning
16
- }
17
- }
18
- }
19
-
20
- return width;
21
- }
22
-
23
- export default ResolveWidth;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/video2audio.py DELETED
@@ -1,27 +0,0 @@
1
- import os
2
- from concurrent.futures import ThreadPoolExecutor
3
-
4
- from moviepy.editor import AudioFileClip
5
-
6
- video_dir = "./video_data/"
7
- audio_dir = "./raw_audio/"
8
- filelist = list(os.walk(video_dir))[0][2]
9
-
10
-
11
- def generate_infos():
12
- videos = []
13
- for file in filelist:
14
- if file.endswith(".mp4"):
15
- videos.append(file)
16
- return videos
17
-
18
-
19
- def clip_file(file):
20
- my_audio_clip = AudioFileClip(video_dir + file)
21
- my_audio_clip.write_audiofile(audio_dir + file.rstrip(".mp4") + ".wav")
22
-
23
-
24
- if __name__ == "__main__":
25
- infos = generate_infos()
26
- with ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
27
- executor.map(clip_file, infos)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alfaxad/BioGalacticModels/app.py DELETED
@@ -1,114 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- from __future__ import annotations
4
-
5
- import gradio as gr
6
-
7
- from model_list import ModelList
8
-
9
- DESCRIPTION = '# Explore Biology & Biochemistry Foundation Models 🧬'
10
- NOTES = '''
11
- Thanks to the following folks who have made suggestions to this list!
12
- - [Shelby](https://twitter.com/shelbynewsad), author of [this nice model list](https://compoundvc.notion.site/compoundvc/474885e638e94e44a1aab4d3124e3d6a?v=299bce7af785413da4c9f36837c03aaf)
13
- - [Valentyn Bezshapkin](https://twitter.com/valentynbez)
14
- - [Payel Das](https://twitter.com/payel791)
15
- - [Anthony Costa](https://twitter.com/anthonycosta)
16
- '''
17
- FOOTER = ''''''
18
-
19
- def main():
20
- model_list = ModelList()
21
-
22
- with gr.Blocks(css='style.css') as demo:
23
- gr.Markdown(DESCRIPTION)
24
-
25
- search_box = gr.Textbox(
26
- label='Search Model Name',
27
- placeholder=
28
- 'You can search for titles with regular expressions. e.g. (?<!sur)face',
29
- max_lines=1)
30
-
31
- case_sensitive = gr.Checkbox(label='Case Sensitive')
32
-
33
- filter_names = gr.CheckboxGroup(choices=[
34
- 'Paper',
35
- 'Code',
36
- 'Model Weights',
37
- ], label='Filter')
38
-
39
- data_type_names = [
40
- 'DNA', 'scRNA', 'mRNA', 'scRNA perturbation', 'RNA structure prediction', 'RNA language model', 'protein language model', 'protein structure prediction',
41
- 'protein generation', 'protein function prediction', 'protein fitness prediction', 'antibody structure prediction', 'antibody language model', 'molecules',
42
- 'ligand generation', 'reaction-to-enzyme', 'enzyme generation', 'epigenomic', 'molecular docking', 'peptide property prediction',
43
- ]
44
-
45
- data_types = gr.CheckboxGroup(choices=data_type_names,
46
- value=data_type_names,
47
- label='Type')
48
-
49
- years = ['2020', '2021', '2022', '2023']
50
-
51
- years_checkbox = gr.CheckboxGroup(choices=years, value=years, label='Year of Publication/Preprint')
52
-
53
- # model_type_names = [
54
- # 'GPT2', 'GPT-Neo', 'GPT-NeoX', 'ESM', 'BERT', 'RoBERTa', 'BART', 'T5', 'MPNN', 'diffusion', 'custom model'
55
- # ]
56
-
57
- # model_types = gr.CheckboxGroup(choices=model_type_names,
58
- # value=model_type_names,
59
- # label='Base Model')
60
-
61
- search_button = gr.Button('Search')
62
-
63
- number_of_models = gr.Textbox(label='Number of Models Found')
64
- table = gr.HTML(show_label=False)
65
-
66
- gr.Markdown(NOTES)
67
- gr.Markdown(FOOTER)
68
-
69
- demo.load(fn=model_list.render,
70
- inputs=[
71
- search_box,
72
- case_sensitive,
73
- filter_names,
74
- data_types,
75
- years_checkbox,
76
- #model_types
77
- ],
78
- outputs=[
79
- number_of_models,
80
- table,
81
- ])
82
- search_box.submit(fn=model_list.render,
83
- inputs=[
84
- search_box,
85
- case_sensitive,
86
- filter_names,
87
- data_types,
88
- years_checkbox,
89
- #model_types
90
- ],
91
- outputs=[
92
- number_of_models,
93
- table,
94
- ])
95
-
96
- search_button.click(fn=model_list.render,
97
- inputs=[
98
- search_box,
99
- case_sensitive,
100
- filter_names,
101
- data_types,
102
- years_checkbox,
103
- #model_types
104
- ],
105
- outputs=[
106
- number_of_models,
107
- table,
108
- ])
109
-
110
- demo.launch(enable_queue=True, share=False)
111
-
112
-
113
- if __name__ == '__main__':
114
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alycer/VITS-Umamusume-voice-synthesizer/ONNXVITS_inference.py DELETED
@@ -1,36 +0,0 @@
1
- import logging
2
- logging.getLogger('numba').setLevel(logging.WARNING)
3
- import IPython.display as ipd
4
- import torch
5
- import commons
6
- import utils
7
- import ONNXVITS_infer
8
- from text import text_to_sequence
9
-
10
- def get_text(text, hps):
11
- text_norm = text_to_sequence(text, hps.symbols, hps.data.text_cleaners)
12
- if hps.data.add_blank:
13
- text_norm = commons.intersperse(text_norm, 0)
14
- text_norm = torch.LongTensor(text_norm)
15
- return text_norm
16
-
17
- hps = utils.get_hparams_from_file("../vits/pretrained_models/uma87.json")
18
-
19
- net_g = ONNXVITS_infer.SynthesizerTrn(
20
- len(hps.symbols),
21
- hps.data.filter_length // 2 + 1,
22
- hps.train.segment_size // hps.data.hop_length,
23
- n_speakers=hps.data.n_speakers,
24
- **hps.model)
25
- _ = net_g.eval()
26
-
27
- _ = utils.load_checkpoint("../vits/pretrained_models/uma_1153000.pth", net_g)
28
-
29
- text1 = get_text("おはようございます。", hps)
30
- stn_tst = text1
31
- with torch.no_grad():
32
- x_tst = stn_tst.unsqueeze(0)
33
- x_tst_lengths = torch.LongTensor([stn_tst.size(0)])
34
- sid = torch.LongTensor([0])
35
- audio = net_g.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, length_scale=1)[0][0,0].data.cpu().float().numpy()
36
- print(audio)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/torch_utils/ops/upfirdn2d.py DELETED
@@ -1,412 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- """Custom PyTorch ops for efficient resampling of 2D images."""
10
-
11
- import os
12
- import numpy as np
13
- import torch
14
-
15
- from .. import custom_ops
16
- from .. import misc
17
- from . import conv2d_gradfix
18
-
19
- # ----------------------------------------------------------------------------
20
-
21
- _plugin = None
22
-
23
-
24
- def _init():
25
- global _plugin
26
- if _plugin is None:
27
- _plugin = custom_ops.get_plugin(
28
- module_name='upfirdn2d_plugin',
29
- sources=['upfirdn2d.cpp', 'upfirdn2d.cu'],
30
- headers=['upfirdn2d.h'],
31
- source_dir=os.path.dirname(__file__),
32
- extra_cuda_cflags=['--use_fast_math',
33
- '--allow-unsupported-compiler'],
34
- )
35
- return True
36
-
37
-
38
- def _parse_scaling(scaling):
39
- if isinstance(scaling, int):
40
- scaling = [scaling, scaling]
41
- assert isinstance(scaling, (list, tuple))
42
- assert all(isinstance(x, int) for x in scaling)
43
- sx, sy = scaling
44
- assert sx >= 1 and sy >= 1
45
- return sx, sy
46
-
47
-
48
- def _parse_padding(padding):
49
- if isinstance(padding, int):
50
- padding = [padding, padding]
51
- assert isinstance(padding, (list, tuple))
52
- assert all(isinstance(x, int) for x in padding)
53
- if len(padding) == 2:
54
- padx, pady = padding
55
- padding = [padx, padx, pady, pady]
56
- padx0, padx1, pady0, pady1 = padding
57
- return padx0, padx1, pady0, pady1
58
-
59
-
60
- def _get_filter_size(f):
61
- if f is None:
62
- return 1, 1
63
- assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
64
- fw = f.shape[-1]
65
- fh = f.shape[0]
66
- with misc.suppress_tracer_warnings():
67
- fw = int(fw)
68
- fh = int(fh)
69
- misc.assert_shape(f, [fh, fw][:f.ndim])
70
- assert fw >= 1 and fh >= 1
71
- return fw, fh
72
-
73
- # ----------------------------------------------------------------------------
74
-
75
-
76
- def setup_filter(f, device=torch.device('cpu'), normalize=True, flip_filter=False, gain=1, separable=None):
77
- r"""Convenience function to setup 2D FIR filter for `upfirdn2d()`.
78
-
79
- Args:
80
- f: Torch tensor, numpy array, or python list of the shape
81
- `[filter_height, filter_width]` (non-separable),
82
- `[filter_taps]` (separable),
83
- `[]` (impulse), or
84
- `None` (identity).
85
- device: Result device (default: cpu).
86
- normalize: Normalize the filter so that it retains the magnitude
87
- for constant input signal (DC)? (default: True).
88
- flip_filter: Flip the filter? (default: False).
89
- gain: Overall scaling factor for signal magnitude (default: 1).
90
- separable: Return a separable filter? (default: select automatically).
91
-
92
- Returns:
93
- Float32 tensor of the shape
94
- `[filter_height, filter_width]` (non-separable) or
95
- `[filter_taps]` (separable).
96
- """
97
- # Validate.
98
- if f is None:
99
- f = 1
100
- f = torch.as_tensor(f, dtype=torch.float32)
101
- assert f.ndim in [0, 1, 2]
102
- assert f.numel() > 0
103
- if f.ndim == 0:
104
- f = f[np.newaxis]
105
-
106
- # Separable?
107
- if separable is None:
108
- separable = (f.ndim == 1 and f.numel() >= 8)
109
- if f.ndim == 1 and not separable:
110
- f = f.ger(f)
111
- assert f.ndim == (1 if separable else 2)
112
-
113
- # Apply normalize, flip, gain, and device.
114
- if normalize:
115
- f /= f.sum()
116
- if flip_filter:
117
- f = f.flip(list(range(f.ndim)))
118
- f = f * (gain ** (f.ndim / 2))
119
- f = f.to(device=device)
120
- return f
121
-
122
- # ----------------------------------------------------------------------------
123
-
124
-
125
- def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl='cuda'):
126
- r"""Pad, upsample, filter, and downsample a batch of 2D images.
127
-
128
- Performs the following sequence of operations for each channel:
129
-
130
- 1. Upsample the image by inserting N-1 zeros after each pixel (`up`).
131
-
132
- 2. Pad the image with the specified number of zeros on each side (`padding`).
133
- Negative padding corresponds to cropping the image.
134
-
135
- 3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it
136
- so that the footprint of all output pixels lies within the input image.
137
-
138
- 4. Downsample the image by keeping every Nth pixel (`down`).
139
-
140
- This sequence of operations bears close resemblance to scipy.signal.upfirdn().
141
- The fused op is considerably more efficient than performing the same calculation
142
- using standard PyTorch ops. It supports gradients of arbitrary order.
143
-
144
- Args:
145
- x: Float32/float64/float16 input tensor of the shape
146
- `[batch_size, num_channels, in_height, in_width]`.
147
- f: Float32 FIR filter of the shape
148
- `[filter_height, filter_width]` (non-separable),
149
- `[filter_taps]` (separable), or
150
- `None` (identity).
151
- up: Integer upsampling factor. Can be a single int or a list/tuple
152
- `[x, y]` (default: 1).
153
- down: Integer downsampling factor. Can be a single int or a list/tuple
154
- `[x, y]` (default: 1).
155
- padding: Padding with respect to the upsampled image. Can be a single number
156
- or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
157
- (default: 0).
158
- flip_filter: False = convolution, True = correlation (default: False).
159
- gain: Overall scaling factor for signal magnitude (default: 1).
160
- impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
161
-
162
- Returns:
163
- Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
164
- """
165
- assert isinstance(x, torch.Tensor)
166
- assert impl in ['ref', 'cuda']
167
- if impl == 'cuda' and x.device.type == 'cuda' and _init():
168
- return _upfirdn2d_cuda(up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain).apply(x, f)
169
- return _upfirdn2d_ref(x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain)
170
-
171
- # ----------------------------------------------------------------------------
172
-
173
-
174
- @misc.profiled_function
175
- def _upfirdn2d_ref(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1):
176
- """Slow reference implementation of `upfirdn2d()` using standard PyTorch ops.
177
- """
178
- # Validate arguments.
179
- assert isinstance(x, torch.Tensor) and x.ndim == 4
180
- if f is None:
181
- f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
182
- assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
183
- assert f.dtype == torch.float32 and not f.requires_grad
184
- batch_size, num_channels, in_height, in_width = x.shape
185
- upx, upy = _parse_scaling(up)
186
- downx, downy = _parse_scaling(down)
187
- padx0, padx1, pady0, pady1 = _parse_padding(padding)
188
-
189
- # Check that upsampled buffer is not smaller than the filter.
190
- upW = in_width * upx + padx0 + padx1
191
- upH = in_height * upy + pady0 + pady1
192
- assert upW >= f.shape[-1] and upH >= f.shape[0]
193
-
194
- # Upsample by inserting zeros.
195
- x = x.reshape([batch_size, num_channels, in_height, 1, in_width, 1])
196
- x = torch.nn.functional.pad(x, [0, upx - 1, 0, 0, 0, upy - 1])
197
- x = x.reshape([batch_size, num_channels, in_height * upy, in_width * upx])
198
-
199
- # Pad or crop.
200
- x = torch.nn.functional.pad(
201
- x, [max(padx0, 0), max(padx1, 0), max(pady0, 0), max(pady1, 0)])
202
- x = x[:, :, max(-pady0, 0): x.shape[2] - max(-pady1, 0),
203
- max(-padx0, 0): x.shape[3] - max(-padx1, 0)]
204
-
205
- # Setup filter.
206
- f = f * (gain ** (f.ndim / 2))
207
- f = f.to(x.dtype)
208
- if not flip_filter:
209
- f = f.flip(list(range(f.ndim)))
210
-
211
- # Convolve with the filter.
212
- f = f[np.newaxis, np.newaxis].repeat([num_channels, 1] + [1] * f.ndim)
213
- if f.ndim == 4:
214
- x = conv2d_gradfix.conv2d(input=x, weight=f, groups=num_channels)
215
- else:
216
- x = conv2d_gradfix.conv2d(
217
- input=x, weight=f.unsqueeze(2), groups=num_channels)
218
- x = conv2d_gradfix.conv2d(
219
- input=x, weight=f.unsqueeze(3), groups=num_channels)
220
-
221
- # Downsample by throwing away pixels.
222
- x = x[:, :, ::downy, ::downx]
223
- return x
224
-
225
- # ----------------------------------------------------------------------------
226
-
227
-
228
- _upfirdn2d_cuda_cache = dict()
229
-
230
-
231
- def _upfirdn2d_cuda(up=1, down=1, padding=0, flip_filter=False, gain=1):
232
- """Fast CUDA implementation of `upfirdn2d()` using custom ops.
233
- """
234
- # Parse arguments.
235
- upx, upy = _parse_scaling(up)
236
- downx, downy = _parse_scaling(down)
237
- padx0, padx1, pady0, pady1 = _parse_padding(padding)
238
-
239
- # Lookup from cache.
240
- key = (upx, upy, downx, downy, padx0, padx1,
241
- pady0, pady1, flip_filter, gain)
242
- if key in _upfirdn2d_cuda_cache:
243
- return _upfirdn2d_cuda_cache[key]
244
-
245
- # Forward op.
246
- class Upfirdn2dCuda(torch.autograd.Function):
247
- @staticmethod
248
- def forward(ctx, x, f): # pylint: disable=arguments-differ
249
- assert isinstance(x, torch.Tensor) and x.ndim == 4
250
- if f is None:
251
- f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
252
- if f.ndim == 1 and f.shape[0] == 1:
253
- # Convert separable-1 into full-1x1.
254
- f = f.square().unsqueeze(0)
255
- assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
256
- y = x
257
- if f.ndim == 2:
258
- y = _plugin.upfirdn2d(
259
- y, f, upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain)
260
- else:
261
- y = _plugin.upfirdn2d(y, f.unsqueeze(
262
- 0), upx, 1, downx, 1, padx0, padx1, 0, 0, flip_filter, 1.0)
263
- y = _plugin.upfirdn2d(y, f.unsqueeze(
264
- 1), 1, upy, 1, downy, 0, 0, pady0, pady1, flip_filter, gain)
265
- ctx.save_for_backward(f)
266
- ctx.x_shape = x.shape
267
- return y
268
-
269
- @staticmethod
270
- def backward(ctx, dy): # pylint: disable=arguments-differ
271
- f, = ctx.saved_tensors
272
- _, _, ih, iw = ctx.x_shape
273
- _, _, oh, ow = dy.shape
274
- fw, fh = _get_filter_size(f)
275
- p = [
276
- fw - padx0 - 1,
277
- iw * upx - ow * downx + padx0 - upx + 1,
278
- fh - pady0 - 1,
279
- ih * upy - oh * downy + pady0 - upy + 1,
280
- ]
281
- dx = None
282
- df = None
283
-
284
- if ctx.needs_input_grad[0]:
285
- dx = _upfirdn2d_cuda(up=down, down=up, padding=p, flip_filter=(
286
- not flip_filter), gain=gain).apply(dy, f)
287
-
288
- assert not ctx.needs_input_grad[1]
289
- return dx, df
290
-
291
- # Add to cache.
292
- _upfirdn2d_cuda_cache[key] = Upfirdn2dCuda
293
- return Upfirdn2dCuda
294
-
295
- # ----------------------------------------------------------------------------
296
-
297
-
298
- def filter2d(x, f, padding=0, flip_filter=False, gain=1, impl='cuda'):
299
- r"""Filter a batch of 2D images using the given 2D FIR filter.
300
-
301
- By default, the result is padded so that its shape matches the input.
302
- User-specified padding is applied on top of that, with negative values
303
- indicating cropping. Pixels outside the image are assumed to be zero.
304
-
305
- Args:
306
- x: Float32/float64/float16 input tensor of the shape
307
- `[batch_size, num_channels, in_height, in_width]`.
308
- f: Float32 FIR filter of the shape
309
- `[filter_height, filter_width]` (non-separable),
310
- `[filter_taps]` (separable), or
311
- `None` (identity).
312
- padding: Padding with respect to the output. Can be a single number or a
313
- list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
314
- (default: 0).
315
- flip_filter: False = convolution, True = correlation (default: False).
316
- gain: Overall scaling factor for signal magnitude (default: 1).
317
- impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
318
-
319
- Returns:
320
- Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
321
- """
322
- padx0, padx1, pady0, pady1 = _parse_padding(padding)
323
- fw, fh = _get_filter_size(f)
324
- p = [
325
- padx0 + fw // 2,
326
- padx1 + (fw - 1) // 2,
327
- pady0 + fh // 2,
328
- pady1 + (fh - 1) // 2,
329
- ]
330
- return upfirdn2d(x, f, padding=p, flip_filter=flip_filter, gain=gain, impl=impl)
331
-
332
- # ----------------------------------------------------------------------------
333
-
334
-
335
- def upsample2d(x, f, up=2, padding=0, flip_filter=False, gain=1, impl='cuda'):
336
- r"""Upsample a batch of 2D images using the given 2D FIR filter.
337
-
338
- By default, the result is padded so that its shape is a multiple of the input.
339
- User-specified padding is applied on top of that, with negative values
340
- indicating cropping. Pixels outside the image are assumed to be zero.
341
-
342
- Args:
343
- x: Float32/float64/float16 input tensor of the shape
344
- `[batch_size, num_channels, in_height, in_width]`.
345
- f: Float32 FIR filter of the shape
346
- `[filter_height, filter_width]` (non-separable),
347
- `[filter_taps]` (separable), or
348
- `None` (identity).
349
- up: Integer upsampling factor. Can be a single int or a list/tuple
350
- `[x, y]` (default: 1).
351
- padding: Padding with respect to the output. Can be a single number or a
352
- list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
353
- (default: 0).
354
- flip_filter: False = convolution, True = correlation (default: False).
355
- gain: Overall scaling factor for signal magnitude (default: 1).
356
- impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
357
-
358
- Returns:
359
- Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
360
- """
361
- upx, upy = _parse_scaling(up)
362
- padx0, padx1, pady0, pady1 = _parse_padding(padding)
363
- fw, fh = _get_filter_size(f)
364
- p = [
365
- padx0 + (fw + upx - 1) // 2,
366
- padx1 + (fw - upx) // 2,
367
- pady0 + (fh + upy - 1) // 2,
368
- pady1 + (fh - upy) // 2,
369
- ]
370
- return upfirdn2d(x, f, up=up, padding=p, flip_filter=flip_filter, gain=gain*upx*upy, impl=impl)
371
-
372
- # ----------------------------------------------------------------------------
373
-
374
-
375
- def downsample2d(x, f, down=2, padding=0, flip_filter=False, gain=1, impl='cuda'):
376
- r"""Downsample a batch of 2D images using the given 2D FIR filter.
377
-
378
- By default, the result is padded so that its shape is a fraction of the input.
379
- User-specified padding is applied on top of that, with negative values
380
- indicating cropping. Pixels outside the image are assumed to be zero.
381
-
382
- Args:
383
- x: Float32/float64/float16 input tensor of the shape
384
- `[batch_size, num_channels, in_height, in_width]`.
385
- f: Float32 FIR filter of the shape
386
- `[filter_height, filter_width]` (non-separable),
387
- `[filter_taps]` (separable), or
388
- `None` (identity).
389
- down: Integer downsampling factor. Can be a single int or a list/tuple
390
- `[x, y]` (default: 1).
391
- padding: Padding with respect to the input. Can be a single number or a
392
- list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
393
- (default: 0).
394
- flip_filter: False = convolution, True = correlation (default: False).
395
- gain: Overall scaling factor for signal magnitude (default: 1).
396
- impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
397
-
398
- Returns:
399
- Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
400
- """
401
- downx, downy = _parse_scaling(down)
402
- padx0, padx1, pady0, pady1 = _parse_padding(padding)
403
- fw, fh = _get_filter_size(f)
404
- p = [
405
- padx0 + (fw - downx + 1) // 2,
406
- padx1 + (fw - downx) // 2,
407
- pady0 + (fh - downy + 1) // 2,
408
- pady1 + (fh - downy) // 2,
409
- ]
410
- return upfirdn2d(x, f, down=down, padding=p, flip_filter=flip_filter, gain=gain, impl=impl)
411
-
412
- # ----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/decoder/__init__.py DELETED
@@ -1,6 +0,0 @@
1
- from src.decoder.tensoRF_decoder import TensorVMSplit
2
- from src.utils.registry import Registry
3
-
4
- DECODER_REGISTRY = Registry("DECODER")
5
-
6
- DECODER_REGISTRY.register(TensorVMSplit)
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_model_editing.py DELETED
@@ -1,757 +0,0 @@
1
- # Copyright 2023 TIME Authors and The HuggingFace Team. All rights reserved."
2
- # Licensed under the Apache License, Version 2.0 (the "License");
3
- # you may not use this file except in compliance with the License.
4
- # You may obtain a copy of the License at
5
- #
6
- # http://www.apache.org/licenses/LICENSE-2.0
7
- #
8
- # Unless required by applicable law or agreed to in writing, software
9
- # distributed under the License is distributed on an "AS IS" BASIS,
10
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
- # See the License for the specific language governing permissions and
12
- # limitations under the License.
13
-
14
- import copy
15
- import inspect
16
- import warnings
17
- from typing import Any, Callable, Dict, List, Optional, Union
18
-
19
- import torch
20
- from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
21
-
22
- from ...image_processor import VaeImageProcessor
23
- from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin
24
- from ...models import AutoencoderKL, UNet2DConditionModel
25
- from ...schedulers import PNDMScheduler
26
- from ...schedulers.scheduling_utils import SchedulerMixin
27
- from ...utils import logging, randn_tensor
28
- from ..pipeline_utils import DiffusionPipeline
29
- from . import StableDiffusionPipelineOutput
30
- from .safety_checker import StableDiffusionSafetyChecker
31
-
32
-
33
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
34
-
35
- AUGS_CONST = ["A photo of ", "An image of ", "A picture of "]
36
-
37
-
38
- class StableDiffusionModelEditingPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin):
39
- r"""
40
- Pipeline for text-to-image model editing.
41
-
42
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
43
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
44
-
45
- Args:
46
- vae ([`AutoencoderKL`]):
47
- Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
48
- text_encoder ([`~transformers.CLIPTextModel`]):
49
- Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
50
- tokenizer ([`~transformers.CLIPTokenizer`]):
51
- A `CLIPTokenizer` to tokenize text.
52
- unet ([`UNet2DConditionModel`]):
53
- A `UNet2DConditionModel` to denoise the encoded image latents.
54
- scheduler ([`SchedulerMixin`]):
55
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
56
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
57
- safety_checker ([`StableDiffusionSafetyChecker`]):
58
- Classification module that estimates whether generated images could be considered offensive or harmful.
59
- Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
60
- about a model's potential harms.
61
- feature_extractor ([`~transformers.CLIPFeatureExtractor`]):
62
- A `CLIPFeatureExtractor` to extract features from generated images; used as inputs to the `safety_checker`.
63
- with_to_k ([`bool`]):
64
- Whether to edit the key projection matrices along with the value projection matrices.
65
- with_augs ([`list`]):
66
- Textual augmentations to apply while editing the text-to-image model. Set to `[]` for no augmentations.
67
- """
68
- _optional_components = ["safety_checker", "feature_extractor"]
69
-
70
- def __init__(
71
- self,
72
- vae: AutoencoderKL,
73
- text_encoder: CLIPTextModel,
74
- tokenizer: CLIPTokenizer,
75
- unet: UNet2DConditionModel,
76
- scheduler: SchedulerMixin,
77
- safety_checker: StableDiffusionSafetyChecker,
78
- feature_extractor: CLIPFeatureExtractor,
79
- requires_safety_checker: bool = True,
80
- with_to_k: bool = True,
81
- with_augs: list = AUGS_CONST,
82
- ):
83
- super().__init__()
84
-
85
- if isinstance(scheduler, PNDMScheduler):
86
- logger.error("PNDMScheduler for this pipeline is currently not supported.")
87
-
88
- if safety_checker is None and requires_safety_checker:
89
- logger.warning(
90
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
91
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
92
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
93
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
94
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
95
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
96
- )
97
-
98
- if safety_checker is not None and feature_extractor is None:
99
- raise ValueError(
100
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
101
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
102
- )
103
-
104
- self.register_modules(
105
- vae=vae,
106
- text_encoder=text_encoder,
107
- tokenizer=tokenizer,
108
- unet=unet,
109
- scheduler=scheduler,
110
- safety_checker=safety_checker,
111
- feature_extractor=feature_extractor,
112
- )
113
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
114
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
115
- self.register_to_config(requires_safety_checker=requires_safety_checker)
116
-
117
- self.with_to_k = with_to_k
118
- self.with_augs = with_augs
119
-
120
- # get cross-attention layers
121
- ca_layers = []
122
-
123
- def append_ca(net_):
124
- if net_.__class__.__name__ == "CrossAttention":
125
- ca_layers.append(net_)
126
- elif hasattr(net_, "children"):
127
- for net__ in net_.children():
128
- append_ca(net__)
129
-
130
- # recursively find all cross-attention layers in unet
131
- for net in self.unet.named_children():
132
- if "down" in net[0]:
133
- append_ca(net[1])
134
- elif "up" in net[0]:
135
- append_ca(net[1])
136
- elif "mid" in net[0]:
137
- append_ca(net[1])
138
-
139
- # get projection matrices
140
- self.ca_clip_layers = [l for l in ca_layers if l.to_v.in_features == 768]
141
- self.projection_matrices = [l.to_v for l in self.ca_clip_layers]
142
- self.og_matrices = [copy.deepcopy(l.to_v) for l in self.ca_clip_layers]
143
- if self.with_to_k:
144
- self.projection_matrices = self.projection_matrices + [l.to_k for l in self.ca_clip_layers]
145
- self.og_matrices = self.og_matrices + [copy.deepcopy(l.to_k) for l in self.ca_clip_layers]
146
-
147
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
148
- def enable_vae_slicing(self):
149
- r"""
150
- Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
151
- compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
152
- """
153
- self.vae.enable_slicing()
154
-
155
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
156
- def disable_vae_slicing(self):
157
- r"""
158
- Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
159
- computing decoding in one step.
160
- """
161
- self.vae.disable_slicing()
162
-
163
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
164
- def _encode_prompt(
165
- self,
166
- prompt,
167
- device,
168
- num_images_per_prompt,
169
- do_classifier_free_guidance,
170
- negative_prompt=None,
171
- prompt_embeds: Optional[torch.FloatTensor] = None,
172
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
173
- lora_scale: Optional[float] = None,
174
- ):
175
- r"""
176
- Encodes the prompt into text encoder hidden states.
177
-
178
- Args:
179
- prompt (`str` or `List[str]`, *optional*):
180
- prompt to be encoded
181
- device: (`torch.device`):
182
- torch device
183
- num_images_per_prompt (`int`):
184
- number of images that should be generated per prompt
185
- do_classifier_free_guidance (`bool`):
186
- whether to use classifier free guidance or not
187
- negative_prompt (`str` or `List[str]`, *optional*):
188
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
189
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
190
- less than `1`).
191
- prompt_embeds (`torch.FloatTensor`, *optional*):
192
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
193
- provided, text embeddings will be generated from `prompt` input argument.
194
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
195
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
196
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
197
- argument.
198
- lora_scale (`float`, *optional*):
199
- A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
200
- """
201
- # set lora scale so that monkey patched LoRA
202
- # function of text encoder can correctly access it
203
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
204
- self._lora_scale = lora_scale
205
-
206
- if prompt is not None and isinstance(prompt, str):
207
- batch_size = 1
208
- elif prompt is not None and isinstance(prompt, list):
209
- batch_size = len(prompt)
210
- else:
211
- batch_size = prompt_embeds.shape[0]
212
-
213
- if prompt_embeds is None:
214
- # textual inversion: procecss multi-vector tokens if necessary
215
- if isinstance(self, TextualInversionLoaderMixin):
216
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
217
-
218
- text_inputs = self.tokenizer(
219
- prompt,
220
- padding="max_length",
221
- max_length=self.tokenizer.model_max_length,
222
- truncation=True,
223
- return_tensors="pt",
224
- )
225
- text_input_ids = text_inputs.input_ids
226
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
227
-
228
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
229
- text_input_ids, untruncated_ids
230
- ):
231
- removed_text = self.tokenizer.batch_decode(
232
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
233
- )
234
- logger.warning(
235
- "The following part of your input was truncated because CLIP can only handle sequences up to"
236
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
237
- )
238
-
239
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
240
- attention_mask = text_inputs.attention_mask.to(device)
241
- else:
242
- attention_mask = None
243
-
244
- prompt_embeds = self.text_encoder(
245
- text_input_ids.to(device),
246
- attention_mask=attention_mask,
247
- )
248
- prompt_embeds = prompt_embeds[0]
249
-
250
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
251
-
252
- bs_embed, seq_len, _ = prompt_embeds.shape
253
- # duplicate text embeddings for each generation per prompt, using mps friendly method
254
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
255
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
256
-
257
- # get unconditional embeddings for classifier free guidance
258
- if do_classifier_free_guidance and negative_prompt_embeds is None:
259
- uncond_tokens: List[str]
260
- if negative_prompt is None:
261
- uncond_tokens = [""] * batch_size
262
- elif prompt is not None and type(prompt) is not type(negative_prompt):
263
- raise TypeError(
264
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
265
- f" {type(prompt)}."
266
- )
267
- elif isinstance(negative_prompt, str):
268
- uncond_tokens = [negative_prompt]
269
- elif batch_size != len(negative_prompt):
270
- raise ValueError(
271
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
272
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
273
- " the batch size of `prompt`."
274
- )
275
- else:
276
- uncond_tokens = negative_prompt
277
-
278
- # textual inversion: procecss multi-vector tokens if necessary
279
- if isinstance(self, TextualInversionLoaderMixin):
280
- uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
281
-
282
- max_length = prompt_embeds.shape[1]
283
- uncond_input = self.tokenizer(
284
- uncond_tokens,
285
- padding="max_length",
286
- max_length=max_length,
287
- truncation=True,
288
- return_tensors="pt",
289
- )
290
-
291
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
292
- attention_mask = uncond_input.attention_mask.to(device)
293
- else:
294
- attention_mask = None
295
-
296
- negative_prompt_embeds = self.text_encoder(
297
- uncond_input.input_ids.to(device),
298
- attention_mask=attention_mask,
299
- )
300
- negative_prompt_embeds = negative_prompt_embeds[0]
301
-
302
- if do_classifier_free_guidance:
303
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
304
- seq_len = negative_prompt_embeds.shape[1]
305
-
306
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
307
-
308
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
309
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
310
-
311
- # For classifier free guidance, we need to do two forward passes.
312
- # Here we concatenate the unconditional and text embeddings into a single batch
313
- # to avoid doing two forward passes
314
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
315
-
316
- return prompt_embeds
317
-
318
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
319
- def run_safety_checker(self, image, device, dtype):
320
- if self.safety_checker is None:
321
- has_nsfw_concept = None
322
- else:
323
- if torch.is_tensor(image):
324
- feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
325
- else:
326
- feature_extractor_input = self.image_processor.numpy_to_pil(image)
327
- safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
328
- image, has_nsfw_concept = self.safety_checker(
329
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
330
- )
331
- return image, has_nsfw_concept
332
-
333
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
334
- def decode_latents(self, latents):
335
- warnings.warn(
336
- "The decode_latents method is deprecated and will be removed in a future version. Please"
337
- " use VaeImageProcessor instead",
338
- FutureWarning,
339
- )
340
- latents = 1 / self.vae.config.scaling_factor * latents
341
- image = self.vae.decode(latents, return_dict=False)[0]
342
- image = (image / 2 + 0.5).clamp(0, 1)
343
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
344
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
345
- return image
346
-
347
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
348
- def prepare_extra_step_kwargs(self, generator, eta):
349
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
350
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
351
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
352
- # and should be between [0, 1]
353
-
354
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
355
- extra_step_kwargs = {}
356
- if accepts_eta:
357
- extra_step_kwargs["eta"] = eta
358
-
359
- # check if the scheduler accepts generator
360
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
361
- if accepts_generator:
362
- extra_step_kwargs["generator"] = generator
363
- return extra_step_kwargs
364
-
365
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs
366
- def check_inputs(
367
- self,
368
- prompt,
369
- height,
370
- width,
371
- callback_steps,
372
- negative_prompt=None,
373
- prompt_embeds=None,
374
- negative_prompt_embeds=None,
375
- ):
376
- if height % 8 != 0 or width % 8 != 0:
377
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
378
-
379
- if (callback_steps is None) or (
380
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
381
- ):
382
- raise ValueError(
383
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
384
- f" {type(callback_steps)}."
385
- )
386
-
387
- if prompt is not None and prompt_embeds is not None:
388
- raise ValueError(
389
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
390
- " only forward one of the two."
391
- )
392
- elif prompt is None and prompt_embeds is None:
393
- raise ValueError(
394
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
395
- )
396
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
397
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
398
-
399
- if negative_prompt is not None and negative_prompt_embeds is not None:
400
- raise ValueError(
401
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
402
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
403
- )
404
-
405
- if prompt_embeds is not None and negative_prompt_embeds is not None:
406
- if prompt_embeds.shape != negative_prompt_embeds.shape:
407
- raise ValueError(
408
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
409
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
410
- f" {negative_prompt_embeds.shape}."
411
- )
412
-
413
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
414
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
415
- shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
416
- if isinstance(generator, list) and len(generator) != batch_size:
417
- raise ValueError(
418
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
419
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
420
- )
421
-
422
- if latents is None:
423
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
424
- else:
425
- latents = latents.to(device)
426
-
427
- # scale the initial noise by the standard deviation required by the scheduler
428
- latents = latents * self.scheduler.init_noise_sigma
429
- return latents
430
-
431
- @torch.no_grad()
432
- def edit_model(
433
- self,
434
- source_prompt: str,
435
- destination_prompt: str,
436
- lamb: float = 0.1,
437
- restart_params: bool = True,
438
- ):
439
- r"""
440
- Apply model editing via closed-form solution (see Eq. 5 in the TIME [paper](https://arxiv.org/abs/2303.08084)).
441
-
442
- Args:
443
- source_prompt (`str`):
444
- The source prompt containing the concept to be edited.
445
- destination_prompt (`str`):
446
- The destination prompt. Must contain all words from `source_prompt` with additional ones to specify the
447
- target edit.
448
- lamb (`float`, *optional*, defaults to 0.1):
449
- The lambda parameter specifying the regularization intesity. Smaller values increase the editing power.
450
- restart_params (`bool`, *optional*, defaults to True):
451
- Restart the model parameters to their pre-trained version before editing. This is done to avoid edit
452
- compounding. When it is `False`, edits accumulate.
453
- """
454
-
455
- # restart LDM parameters
456
- if restart_params:
457
- num_ca_clip_layers = len(self.ca_clip_layers)
458
- for idx_, l in enumerate(self.ca_clip_layers):
459
- l.to_v = copy.deepcopy(self.og_matrices[idx_])
460
- self.projection_matrices[idx_] = l.to_v
461
- if self.with_to_k:
462
- l.to_k = copy.deepcopy(self.og_matrices[num_ca_clip_layers + idx_])
463
- self.projection_matrices[num_ca_clip_layers + idx_] = l.to_k
464
-
465
- # set up sentences
466
- old_texts = [source_prompt]
467
- new_texts = [destination_prompt]
468
- # add augmentations
469
- base = old_texts[0] if old_texts[0][0:1] != "A" else "a" + old_texts[0][1:]
470
- for aug in self.with_augs:
471
- old_texts.append(aug + base)
472
- base = new_texts[0] if new_texts[0][0:1] != "A" else "a" + new_texts[0][1:]
473
- for aug in self.with_augs:
474
- new_texts.append(aug + base)
475
-
476
- # prepare input k* and v*
477
- old_embs, new_embs = [], []
478
- for old_text, new_text in zip(old_texts, new_texts):
479
- text_input = self.tokenizer(
480
- [old_text, new_text],
481
- padding="max_length",
482
- max_length=self.tokenizer.model_max_length,
483
- truncation=True,
484
- return_tensors="pt",
485
- )
486
- text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]
487
- old_emb, new_emb = text_embeddings
488
- old_embs.append(old_emb)
489
- new_embs.append(new_emb)
490
-
491
- # identify corresponding destinations for each token in old_emb
492
- idxs_replaces = []
493
- for old_text, new_text in zip(old_texts, new_texts):
494
- tokens_a = self.tokenizer(old_text).input_ids
495
- tokens_b = self.tokenizer(new_text).input_ids
496
- tokens_a = [self.tokenizer.encode("a ")[1] if self.tokenizer.decode(t) == "an" else t for t in tokens_a]
497
- tokens_b = [self.tokenizer.encode("a ")[1] if self.tokenizer.decode(t) == "an" else t for t in tokens_b]
498
- num_orig_tokens = len(tokens_a)
499
- idxs_replace = []
500
- j = 0
501
- for i in range(num_orig_tokens):
502
- curr_token = tokens_a[i]
503
- while tokens_b[j] != curr_token:
504
- j += 1
505
- idxs_replace.append(j)
506
- j += 1
507
- while j < 77:
508
- idxs_replace.append(j)
509
- j += 1
510
- while len(idxs_replace) < 77:
511
- idxs_replace.append(76)
512
- idxs_replaces.append(idxs_replace)
513
-
514
- # prepare batch: for each pair of setences, old context and new values
515
- contexts, valuess = [], []
516
- for old_emb, new_emb, idxs_replace in zip(old_embs, new_embs, idxs_replaces):
517
- context = old_emb.detach()
518
- values = []
519
- with torch.no_grad():
520
- for layer in self.projection_matrices:
521
- values.append(layer(new_emb[idxs_replace]).detach())
522
- contexts.append(context)
523
- valuess.append(values)
524
-
525
- # edit the model
526
- for layer_num in range(len(self.projection_matrices)):
527
- # mat1 = \lambda W + \sum{v k^T}
528
- mat1 = lamb * self.projection_matrices[layer_num].weight
529
-
530
- # mat2 = \lambda I + \sum{k k^T}
531
- mat2 = lamb * torch.eye(
532
- self.projection_matrices[layer_num].weight.shape[1],
533
- device=self.projection_matrices[layer_num].weight.device,
534
- )
535
-
536
- # aggregate sums for mat1, mat2
537
- for context, values in zip(contexts, valuess):
538
- context_vector = context.reshape(context.shape[0], context.shape[1], 1)
539
- context_vector_T = context.reshape(context.shape[0], 1, context.shape[1])
540
- value_vector = values[layer_num].reshape(values[layer_num].shape[0], values[layer_num].shape[1], 1)
541
- for_mat1 = (value_vector @ context_vector_T).sum(dim=0)
542
- for_mat2 = (context_vector @ context_vector_T).sum(dim=0)
543
- mat1 += for_mat1
544
- mat2 += for_mat2
545
-
546
- # update projection matrix
547
- self.projection_matrices[layer_num].weight = torch.nn.Parameter(mat1 @ torch.inverse(mat2))
548
-
549
- @torch.no_grad()
550
- def __call__(
551
- self,
552
- prompt: Union[str, List[str]] = None,
553
- height: Optional[int] = None,
554
- width: Optional[int] = None,
555
- num_inference_steps: int = 50,
556
- guidance_scale: float = 7.5,
557
- negative_prompt: Optional[Union[str, List[str]]] = None,
558
- num_images_per_prompt: Optional[int] = 1,
559
- eta: float = 0.0,
560
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
561
- latents: Optional[torch.FloatTensor] = None,
562
- prompt_embeds: Optional[torch.FloatTensor] = None,
563
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
564
- output_type: Optional[str] = "pil",
565
- return_dict: bool = True,
566
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
567
- callback_steps: int = 1,
568
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
569
- ):
570
- r"""
571
- The call function to the pipeline for generation.
572
-
573
- Args:
574
- prompt (`str` or `List[str]`, *optional*):
575
- The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
576
- height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
577
- The height in pixels of the generated image.
578
- width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
579
- The width in pixels of the generated image.
580
- num_inference_steps (`int`, *optional*, defaults to 50):
581
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
582
- expense of slower inference.
583
- guidance_scale (`float`, *optional*, defaults to 7.5):
584
- A higher guidance scale value encourages the model to generate images closely linked to the text
585
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
586
- negative_prompt (`str` or `List[str]`, *optional*):
587
- The prompt or prompts to guide what to not include in image generation. If not defined, you need to
588
- pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
589
- num_images_per_prompt (`int`, *optional*, defaults to 1):
590
- The number of images to generate per prompt.
591
- eta (`float`, *optional*, defaults to 0.0):
592
- Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
593
- to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
594
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
595
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
596
- generation deterministic.
597
- latents (`torch.FloatTensor`, *optional*):
598
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
599
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
600
- tensor is generated by sampling using the supplied random `generator`.
601
- prompt_embeds (`torch.FloatTensor`, *optional*):
602
- Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
603
- provided, text embeddings are generated from the `prompt` input argument.
604
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
605
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
606
- not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
607
- output_type (`str`, *optional*, defaults to `"pil"`):
608
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
609
- return_dict (`bool`, *optional*, defaults to `True`):
610
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
611
- plain tuple.
612
- callback (`Callable`, *optional*):
613
- A function that calls every `callback_steps` steps during inference. The function is called with the
614
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
615
- callback_steps (`int`, *optional*, defaults to 1):
616
- The frequency at which the `callback` function is called. If not specified, the callback is called at
617
- every step.
618
- cross_attention_kwargs (`dict`, *optional*):
619
- A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
620
- [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
621
-
622
- Examples:
623
-
624
- ```py
625
- >>> import torch
626
- >>> from diffusers import StableDiffusionModelEditingPipeline
627
-
628
- >>> model_ckpt = "CompVis/stable-diffusion-v1-4"
629
- >>> pipe = StableDiffusionModelEditingPipeline.from_pretrained(model_ckpt)
630
-
631
- >>> pipe = pipe.to("cuda")
632
-
633
- >>> source_prompt = "A pack of roses"
634
- >>> destination_prompt = "A pack of blue roses"
635
- >>> pipe.edit_model(source_prompt, destination_prompt)
636
-
637
- >>> prompt = "A field of roses"
638
- >>> image = pipe(prompt).images[0]
639
- ```
640
-
641
- Returns:
642
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
643
- If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
644
- otherwise a `tuple` is returned where the first element is a list with the generated images and the
645
- second element is a list of `bool`s indicating whether the corresponding generated image contains
646
- "not-safe-for-work" (nsfw) content.
647
- """
648
- # 0. Default height and width to unet
649
- height = height or self.unet.config.sample_size * self.vae_scale_factor
650
- width = width or self.unet.config.sample_size * self.vae_scale_factor
651
-
652
- # 1. Check inputs. Raise error if not correct
653
- self.check_inputs(
654
- prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
655
- )
656
-
657
- # 2. Define call parameters
658
- if prompt is not None and isinstance(prompt, str):
659
- batch_size = 1
660
- elif prompt is not None and isinstance(prompt, list):
661
- batch_size = len(prompt)
662
- else:
663
- batch_size = prompt_embeds.shape[0]
664
-
665
- device = self._execution_device
666
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
667
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
668
- # corresponds to doing no classifier free guidance.
669
- do_classifier_free_guidance = guidance_scale > 1.0
670
-
671
- # 3. Encode input prompt
672
- text_encoder_lora_scale = (
673
- cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
674
- )
675
- prompt_embeds = self._encode_prompt(
676
- prompt,
677
- device,
678
- num_images_per_prompt,
679
- do_classifier_free_guidance,
680
- negative_prompt,
681
- prompt_embeds=prompt_embeds,
682
- negative_prompt_embeds=negative_prompt_embeds,
683
- lora_scale=text_encoder_lora_scale,
684
- )
685
-
686
- # 4. Prepare timesteps
687
- self.scheduler.set_timesteps(num_inference_steps, device=device)
688
- timesteps = self.scheduler.timesteps
689
-
690
- # 5. Prepare latent variables
691
- num_channels_latents = self.unet.config.in_channels
692
- latents = self.prepare_latents(
693
- batch_size * num_images_per_prompt,
694
- num_channels_latents,
695
- height,
696
- width,
697
- prompt_embeds.dtype,
698
- device,
699
- generator,
700
- latents,
701
- )
702
-
703
- # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
704
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
705
-
706
- # 7. Denoising loop
707
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
708
- with self.progress_bar(total=num_inference_steps) as progress_bar:
709
- for i, t in enumerate(timesteps):
710
- # expand the latents if we are doing classifier free guidance
711
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
712
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
713
-
714
- # predict the noise residual
715
- noise_pred = self.unet(
716
- latent_model_input,
717
- t,
718
- encoder_hidden_states=prompt_embeds,
719
- cross_attention_kwargs=cross_attention_kwargs,
720
- ).sample
721
-
722
- # perform guidance
723
- if do_classifier_free_guidance:
724
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
725
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
726
-
727
- # compute the previous noisy sample x_t -> x_t-1
728
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
729
-
730
- # call the callback, if provided
731
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
732
- progress_bar.update()
733
- if callback is not None and i % callback_steps == 0:
734
- callback(i, t, latents)
735
-
736
- if not output_type == "latent":
737
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
738
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
739
- else:
740
- image = latents
741
- has_nsfw_concept = None
742
-
743
- if has_nsfw_concept is None:
744
- do_denormalize = [True] * image.shape[0]
745
- else:
746
- do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
747
-
748
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
749
-
750
- # Offload last model to CPU
751
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
752
- self.final_offload_hook.offload()
753
-
754
- if not return_dict:
755
- return (image, has_nsfw_concept)
756
-
757
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/dummy_note_seq_objects.py DELETED
@@ -1,17 +0,0 @@
1
- # This file is autogenerated by the command `make fix-copies`, do not edit.
2
- from ..utils import DummyObject, requires_backends
3
-
4
-
5
- class MidiProcessor(metaclass=DummyObject):
6
- _backends = ["note_seq"]
7
-
8
- def __init__(self, *args, **kwargs):
9
- requires_backends(self, ["note_seq"])
10
-
11
- @classmethod
12
- def from_config(cls, *args, **kwargs):
13
- requires_backends(cls, ["note_seq"])
14
-
15
- @classmethod
16
- def from_pretrained(cls, *args, **kwargs):
17
- requires_backends(cls, ["note_seq"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/fp16/retinanet_r50_fpn_fp16_1x_coco.py DELETED
@@ -1,3 +0,0 @@
1
- _base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py'
2
- # fp16 settings
3
- fp16 = dict(loss_scale=512.)
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './deeplabv3plus_r50-d8_480x480_80k_pascal_context.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/AndySAnker/DeepStruc/tools/data_loader.py DELETED
@@ -1,236 +0,0 @@
1
- import os, torch, h5py, random, sys, shutil, yaml
2
- from pytorch_lightning.callbacks import ModelCheckpoint
3
- import numpy as np
4
- from torch_geometric.data import Data, DataLoader
5
- from tqdm import tqdm
6
- import pytorch_lightning as pl
7
-
8
-
9
- class graph_loader(pl.LightningDataModule):
10
- def __init__(self, data_dir, cluster_size=None, num_files=None, batchsize=1, shuffle=True, num_workers=0):
11
- super(graph_loader, self).__init__()
12
- """
13
-
14
- Parameters
15
- ----------
16
- data_dir
17
- num_files
18
- batchsize
19
- shuffle
20
-
21
- Returns
22
- -------
23
-
24
- """
25
- self.batchsize = int(batchsize)
26
- self.num_workers = num_workers
27
- self.files_sorted = sorted(os.listdir(data_dir))
28
- self.cluster_size = cluster_size
29
- files = self.files_sorted.copy()
30
- # files = [file for file in files if 'FCC' in file]
31
-
32
- if shuffle == True:
33
- random.shuffle(files)
34
- if files != None:
35
- files = files[:num_files]
36
- else:
37
- pass
38
-
39
- nTrain = int(0.6 * len(files))
40
- nValid = int((len(files) - nTrain) / 2)
41
- nTest = len(files) - (nTrain + nValid)
42
-
43
- print('\nBatch size: {}'.format(batchsize))
44
- print('Total number of graphs {}.'.format(len(files)))
45
- print('\tTraining files:', nTrain)
46
- print('\tValidation files:', nValid)
47
- print('\tTest files:', nTest, '\n')
48
-
49
- self.trSamples, self.vlSamples, self.teSamples = list(), list(), list()
50
- print('Loading graphs:')
51
-
52
- for idx in range(len(files)):
53
- h5f = h5py.File(data_dir + '/' + files[idx], 'r')
54
- b = h5f['Node Feature Matrix'][:]
55
- h5f.close()
56
-
57
- if self.cluster_size == None:
58
- self.cluster_size = len(b)
59
- elif len(b) > self.cluster_size:
60
- self.cluster_size = len(b)
61
-
62
- largest_x_dist, largest_y_dist, largest_z_dist, edge_f_max = 0, 0, 0, 0
63
- for idx in range(nTrain):
64
- h5f = h5py.File(data_dir + '/' + files[idx], 'r')
65
- a = h5f['Edge Feature Matrix'][:]
66
- b = h5f['Node Feature Matrix'][:]
67
-
68
- h5f.close()
69
-
70
- diff_ph = abs(np.amin(b, axis=0)) + np.amax(b, axis=0)
71
- if largest_x_dist < diff_ph[0]:
72
- largest_x_dist = diff_ph[0]
73
- if largest_y_dist < diff_ph[1]:
74
- largest_y_dist = diff_ph[1]
75
- if largest_z_dist < diff_ph[2]:
76
- largest_z_dist = diff_ph[2]
77
- if np.amax(a) > edge_f_max:
78
- edge_f_max = np.amax(a)
79
-
80
- self.largest_x_dist = largest_x_dist
81
- self.largest_y_dist = largest_y_dist
82
- self.largest_z_dist = largest_z_dist
83
-
84
- for idx in tqdm(range(len(files))):
85
- h5f = h5py.File(data_dir + '/' + files[idx], 'r')
86
- a = h5f['Edge Feature Matrix'][:] # todo: norm this
87
- b = h5f['Node Feature Matrix'][:]
88
- c = h5f['Edge Directions'][:]
89
- d = h5f['PDF label'][:]
90
- h5f.close()
91
-
92
- a /= edge_f_max
93
- min_vals = np.amin(b, axis=0)
94
- if min_vals[0] < 0.0: # Make all coordinates positive
95
- b[:, 0] -= min_vals[0]
96
- if min_vals[1] < 0.0: # Make all coordinates positive
97
- b[:, 1] -= min_vals[1]
98
- if min_vals[2] < 0.0: # Make all coordinates positive
99
- b[:, 2] -= min_vals[2]
100
-
101
- b[:, 0] /= largest_x_dist
102
- b[:, 1] /= largest_y_dist
103
- b[:, 2] /= largest_z_dist
104
-
105
- cord_ph = np.zeros((self.cluster_size, np.shape(b)[1])) - 1
106
- cord_ph[:np.shape(b)[0]] = b
107
-
108
- d /= np.amax(d) # Standardize PDF
109
-
110
- pdf = torch.tensor([d], dtype=torch.float)
111
- x = torch.tensor(b, dtype=torch.float)
112
- y = torch.tensor([cord_ph], dtype=torch.float)
113
- edge_index = torch.tensor(c, dtype=torch.long)
114
- edge_attr = torch.tensor(a, dtype=torch.float)
115
- name_idx = torch.tensor(self.files_sorted.index(files[idx]), dtype=torch.int16)
116
-
117
- if idx < nTrain:
118
- self.trSamples.append(
119
- tuple((Data(x=x, y=y, edge_index=edge_index, edge_attr=edge_attr), pdf.T, name_idx)))
120
- elif idx < nTrain + nValid:
121
- self.vlSamples.append(
122
- tuple((Data(x=x, y=y, edge_index=edge_index, edge_attr=edge_attr), pdf.T, name_idx)))
123
- else:
124
- self.teSamples.append(
125
- tuple((Data(x=x, y=y, edge_index=edge_index, edge_attr=edge_attr), pdf.T, name_idx)))
126
-
127
- def train_dataloader(self):
128
- return DataLoader(self.trSamples, batch_size=self.batchsize, shuffle=True, num_workers=self.num_workers)
129
-
130
- def val_dataloader(self):
131
- return DataLoader(self.vlSamples, batch_size=self.batchsize, num_workers=self.num_workers)
132
-
133
- def test_dataloader(self):
134
- return DataLoader(self.teSamples, batch_size=self.batchsize, num_workers=self.num_workers)
135
-
136
-
137
- def save_xyz_file(save_dir, cords, file_name, xyz_scale=[1,1,1]):
138
-
139
- cords = [xyz for xyz in cords if np.mean(xyz) >= -0.2]
140
- cords = np.array(cords)
141
- cords[:,0] -= cords[:,0].mean()
142
- cords[:,1] -= cords[:,1].mean()
143
- cords[:,2] -= cords[:,2].mean()
144
- these_cords = []
145
- for count, xyz in enumerate(cords):
146
- if count == 0:
147
- these_cords.append(['{:d}'.format(len(cords))])
148
- these_cords.append([''])
149
-
150
- these_cords.append(['W {:.4f} {:.4f} {:.4f}'.format(xyz[0]*xyz_scale[0], xyz[1]*xyz_scale[1], xyz[2]*xyz_scale[2])])
151
-
152
- np.savetxt(save_dir + '/{}.xyz'.format(file_name), these_cords, fmt='%s')
153
-
154
- return these_cords
155
-
156
-
157
- def folder_manager(input_dict, model_arch):
158
- this_trainer = None
159
- epoch = input_dict['epochs']
160
- if not os.path.isdir(input_dict['save_dir']):
161
- os.mkdir(input_dict['save_dir'])
162
- os.mkdir(input_dict['save_dir'] + '/models')
163
- shutil.copy2('train.py', input_dict['save_dir'] + '/train.py')
164
- shutil.copy2('./tools/data_loader.py', input_dict['save_dir'] + '/data_loader.py')
165
- shutil.copy2('./tools/module.py', input_dict['save_dir'] + '/module.py')
166
- os.mkdir(input_dict['save_dir'] + '/prior')
167
- os.mkdir(input_dict['save_dir'] + '/posterior')
168
- else:
169
- shutil.copy2('train.py', input_dict['save_dir'] + '/train.py')
170
- shutil.copy2('./tools/data_loader.py', input_dict['save_dir'] + '/data_loader.py')
171
- shutil.copy2('./tools/module.py', input_dict['save_dir'] + '/module.py')
172
-
173
- if input_dict['load_trainer']:
174
- best_model = sorted(os.listdir(input_dict['save_dir'] + '/models'))
175
- print(f'\nUsing {best_model[0]} as starting model!\n')
176
- this_trainer = input_dict['save_dir'] + '/models/' + best_model[0]
177
- #input_dict = yaml.load(f'{input_dict["save_dir"]}/input_dict.yaml', Loader=yaml.FullLoader)
178
-
179
- try:
180
- with open(f'{input_dict["save_dir"]}/input_dict.yaml') as file:
181
- input_dict = yaml.full_load(file)
182
- input_dict['load_trainer'] = True
183
- input_dict['epochs'] = epoch
184
- with open(f'{input_dict["save_dir"]}/model_arch.yaml') as file:
185
- model_arch = yaml.full_load(file)
186
- except FileNotFoundError: # todo: transition - need to be deleted at some point
187
- with open(f'{input_dict["save_dir"]}/input_dict.yaml', 'w') as outfile:
188
- yaml.dump(input_dict, outfile, allow_unicode=True, default_flow_style=False)
189
-
190
- with open(f'{input_dict["save_dir"]}/model_arch.yaml', 'w') as outfile:
191
- yaml.dump(model_arch, outfile, allow_unicode=True, default_flow_style=False)
192
- else:
193
- with open(f'{input_dict["save_dir"]}/input_dict.yaml', 'w') as outfile:
194
- yaml.dump(input_dict, outfile, allow_unicode=True, default_flow_style=False)
195
-
196
- with open(f'{input_dict["save_dir"]}/model_arch.yaml', 'w') as outfile:
197
- yaml.dump(model_arch, outfile, allow_unicode=True, default_flow_style=False)
198
- return this_trainer, input_dict, model_arch
199
-
200
-
201
- def get_callbacks(save_dir):
202
- checkpoint_callback_tot = ModelCheckpoint(
203
- monitor='vld_tot',
204
- dirpath=save_dir + '/models',
205
- filename='model-{vld_tot:.5f}-{beta:.3f}-{vld_rec_pdf:.5f}-{epoch:010d}',
206
- save_top_k=5,
207
- mode='min',
208
- save_last=True,
209
- )
210
-
211
- checkpoint_callback_rec = ModelCheckpoint(
212
- monitor='vld_rec',
213
- dirpath=save_dir + '/models',
214
- filename='model-{vld_rec:.5f}-{beta:.3f}-{vld_rec_pdf:.5f}-{vld_tot:.5f}-{epoch:010d}',
215
- save_top_k=5,
216
- mode='min',
217
- )
218
-
219
- checkpoint_callback_kld = ModelCheckpoint(
220
- monitor='vld_kld',
221
- dirpath=save_dir + '/models',
222
- filename='model-{vld_kld:.5f}-{beta:.3f}-{vld_rec_pdf:.5f}-{vld_tot:.5f}-{epoch:010d}',
223
- save_top_k=5,
224
- mode='min',
225
- )
226
-
227
- checkpoint_callback_vld_rec_pdf = ModelCheckpoint(
228
- monitor='vld_rec_pdf',
229
- dirpath=save_dir + '/models',
230
- filename='model-{vld_rec_pdf:.5f}-{beta:.3f}-{vld_tot:.5f}-{epoch:010d}',
231
- save_top_k=5,
232
- mode='min',
233
- )
234
-
235
- return [checkpoint_callback_tot, checkpoint_callback_rec, checkpoint_callback_kld,
236
- checkpoint_callback_vld_rec_pdf]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/multimodal/script.py DELETED
@@ -1,112 +0,0 @@
1
- import base64
2
- import re
3
- import time
4
- from functools import partial
5
- from io import BytesIO
6
-
7
- import gradio as gr
8
- import torch
9
-
10
- from extensions.multimodal.multimodal_embedder import MultimodalEmbedder
11
- from modules import shared
12
- from modules.logging_colors import logger
13
-
14
- params = {
15
- "add_all_images_to_prompt": False,
16
- # device to run vision encoder on
17
- "vision_device": None,
18
- # bits to load vision encoder in, either 16 or 32
19
- "vision_bits": 32,
20
- # device to run multimodal projector on
21
- "projector_device": None,
22
- # multimodal projector bits, either 32 or 16
23
- "projector_bits": 32
24
- }
25
-
26
-
27
- # If 'state' is True, will hijack the next chat generation
28
- input_hijack = {
29
- 'state': False,
30
- 'value': ["", ""]
31
- }
32
-
33
-
34
- # initialized in ui, so that params are loaded from settings
35
- multimodal_embedder: MultimodalEmbedder = None
36
-
37
-
38
- def chat_input_modifier(text, visible_text, state):
39
- global input_hijack
40
- if input_hijack['state']:
41
- input_hijack['state'] = False
42
- return input_hijack['value'](text, visible_text)
43
- else:
44
- return text, visible_text
45
-
46
-
47
- def add_chat_picture(picture, text, visible_text):
48
- # resize the image, so that shortest edge is at least 224 (size for CLIP), and at most 300 (to keep history manageable)
49
- max_hw, min_hw = max(picture.size), min(picture.size)
50
- aspect_ratio = max_hw / min_hw
51
- shortest_edge = int(max(300 / aspect_ratio, 224))
52
- longest_edge = int(shortest_edge * aspect_ratio)
53
- w = shortest_edge if picture.width < picture.height else longest_edge
54
- h = shortest_edge if picture.width >= picture.height else longest_edge
55
- picture = picture.resize((w, h))
56
-
57
- buffer = BytesIO()
58
- picture.save(buffer, format="JPEG")
59
- img_str = base64.b64encode(buffer.getvalue()).decode('utf-8')
60
- image = f'<img src="data:image/jpeg;base64,{img_str}">'
61
-
62
- if '<image>' in text:
63
- text = text.replace('<image>', image)
64
- else:
65
- text = text + '\n' + image
66
-
67
- if visible_text == '' or visible_text is None:
68
- visible_text = text
69
- elif '<image>' in visible_text:
70
- visible_text = visible_text.replace('<image>', image)
71
- else:
72
- visible_text = visible_text + '\n' + image
73
-
74
- return text, visible_text
75
-
76
-
77
- def custom_tokenized_length(prompt):
78
- return multimodal_embedder.len_in_tokens(prompt)
79
-
80
-
81
- def tokenizer_modifier(state, prompt, input_ids, input_embeds):
82
- global params
83
- start_ts = time.time()
84
- image_match = re.search(r'<img src="data:image/jpeg;base64,[A-Za-z0-9+/=]+">', prompt)
85
-
86
- if image_match is None:
87
- return prompt, input_ids, input_embeds
88
-
89
- prompt, input_ids, input_embeds, total_embedded = multimodal_embedder.forward(prompt, state, params)
90
- logger.info(f'Embedded {total_embedded} image(s) in {time.time()-start_ts:.2f}s')
91
- return (prompt,
92
- input_ids.unsqueeze(0).to(shared.model.device, dtype=torch.int64),
93
- input_embeds.unsqueeze(0).to(shared.model.device, dtype=shared.model.dtype))
94
-
95
-
96
- def ui():
97
- global multimodal_embedder
98
- multimodal_embedder = MultimodalEmbedder(params)
99
- with gr.Column():
100
- picture_select = gr.Image(label='Send a picture', type='pil')
101
- # The models don't seem to deal well with multiple images
102
- single_image_checkbox = gr.Checkbox(False, label='Embed all images, not only the last one')
103
- # Prepare the input hijack
104
- picture_select.upload(
105
- lambda picture: input_hijack.update({"state": True, "value": partial(add_chat_picture, picture)}),
106
- [picture_select],
107
- None
108
- )
109
- picture_select.clear(lambda: input_hijack.update({"state": False, "value": ["", ""]}), None, None)
110
- single_image_checkbox.change(lambda x: params.update({"add_all_images_to_prompt": x}), single_image_checkbox, None)
111
- shared.gradio['Generate'].click(lambda: None, None, picture_select)
112
- shared.gradio['textbox'].submit(lambda: None, None, picture_select)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/__init__.py DELETED
@@ -1,27 +0,0 @@
1
- # Uniformer
2
- # From https://github.com/Sense-X/UniFormer
3
- # # Apache-2.0 license
4
-
5
- import os
6
-
7
- from annotator.uniformer.mmseg.apis import init_segmentor, inference_segmentor, show_result_pyplot
8
- from annotator.uniformer.mmseg.core.evaluation import get_palette
9
- from annotator.util import annotator_ckpts_path
10
-
11
-
12
- checkpoint_file = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/upernet_global_small.pth"
13
-
14
-
15
- class UniformerDetector:
16
- def __init__(self):
17
- modelpath = os.path.join(annotator_ckpts_path, "upernet_global_small.pth")
18
- if not os.path.exists(modelpath):
19
- from basicsr.utils.download_util import load_file_from_url
20
- load_file_from_url(checkpoint_file, model_dir=annotator_ckpts_path)
21
- config_file = os.path.join(os.path.dirname(annotator_ckpts_path), "uniformer", "exp", "upernet_global_small", "config.py")
22
- self.model = init_segmentor(config_file, modelpath).cuda()
23
-
24
- def __call__(self, img):
25
- result = inference_segmentor(self.model, img)
26
- res_img = show_result_pyplot(self.model, img, result, get_palette('ade'), opacity=1)
27
- return res_img
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArdaSaygan/PollGeneratorApp/app.py DELETED
@@ -1,60 +0,0 @@
1
- from email import message
2
- import gradio as gr
3
- import os
4
- import openai
5
- from create_poll import create_poll
6
- from utils import GPTCompletion
7
- from dotenv import load_dotenv
8
-
9
- gr.close_all()
10
-
11
- load_dotenv()
12
-
13
- openai.api_key = os.environ["OPENAI_API_KEY"]
14
-
15
-
16
-
17
- def chatWithGPT(chatHistory):
18
- completion = GPTCompletion(system="You are an AI chatting with a human.", max_tokens=2048, temperature=1.5)
19
- gptResponse = completion.chatComplete(chatHistory)
20
- chatHistory[-1][1] = gptResponse
21
- return chatHistory
22
-
23
- with gr.Blocks() as demo:
24
- chatHistory = gr.State(value = [])
25
-
26
- def generateResponse(message, chatHistory):
27
- completion = GPTCompletion(system="You are an AI chatting with a human.", max_tokens=2048, temperature=1.5)
28
- gptResponse = completion.chatComplete(chatHistory,message)
29
- chatHistory.append((message, gptResponse))
30
- return chatHistory
31
-
32
- def pollinize(chatHistory):
33
-
34
- chatList = []
35
- for log in chatHistory:
36
- chatList.append("User: " + log[0])
37
- chatList.append("AI: " + log[1])
38
- chatString = "\n".join(chatList)
39
-
40
- return create_poll(chatString, openai.api_key)
41
-
42
- def uploadApi(apikey):
43
- openai.api_key = apikey
44
-
45
- gr.Markdown("This little app is a demonstration of how LLMs can be used to create Polls from a chat. To give it a try, discuss a topic with ChatGPT first and then push Generate Poll button. Poll question will be generated on the context of the chat.")
46
-
47
- chatbot = gr.Chatbot().style(height=460)
48
- input = gr.Textbox(label="Messeage")
49
- nextBtn = gr.Button("Send message")
50
- nextBtn.click(generateResponse, [input, chatHistory], chatbot, scroll_to_output=True, show_progress=True)
51
-
52
- debatePoll = gr.Textbox(label="Poll")
53
- pollinizeButton = gr.Button("Create a poll")
54
- pollinizeButton.click(pollinize,chatHistory, debatePoll, scroll_to_output=True, show_progress=True)
55
-
56
- apikey = gr.Textbox(label="API Key")
57
- apiUpload = gr.Button("Upload custom api key")
58
- apiUpload.click(uploadApi, apikey, None, scroll_to_output=True, show_progress=True)
59
-
60
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arnx/MusicGenXvAKN/tests/data/test_audio_dataset.py DELETED
@@ -1,352 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from functools import partial
8
- from itertools import product
9
- import json
10
- import math
11
- import os
12
- import random
13
- import typing as tp
14
-
15
- import pytest
16
- import torch
17
- from torch.utils.data import DataLoader
18
-
19
- from audiocraft.data.audio_dataset import (
20
- AudioDataset,
21
- AudioMeta,
22
- _get_audio_meta,
23
- load_audio_meta,
24
- save_audio_meta
25
- )
26
- from audiocraft.data.zip import PathInZip
27
-
28
- from ..common_utils import TempDirMixin, get_white_noise, save_wav
29
-
30
-
31
- class TestAudioMeta(TempDirMixin):
32
-
33
- def test_get_audio_meta(self):
34
- sample_rates = [8000, 16_000]
35
- channels = [1, 2]
36
- duration = 1.
37
- for sample_rate, ch in product(sample_rates, channels):
38
- n_frames = int(duration * sample_rate)
39
- wav = get_white_noise(ch, n_frames)
40
- path = self.get_temp_path('sample.wav')
41
- save_wav(path, wav, sample_rate)
42
- m = _get_audio_meta(path, minimal=True)
43
- assert m.path == path, 'path does not match'
44
- assert m.sample_rate == sample_rate, 'sample rate does not match'
45
- assert m.duration == duration, 'duration does not match'
46
- assert m.amplitude is None
47
- assert m.info_path is None
48
-
49
- def test_save_audio_meta(self):
50
- audio_meta = [
51
- AudioMeta("mypath1", 1., 16_000, None, None, PathInZip('/foo/bar.zip:/relative/file1.json')),
52
- AudioMeta("mypath2", 2., 16_000, None, None, PathInZip('/foo/bar.zip:/relative/file2.json'))
53
- ]
54
- empty_audio_meta = []
55
- for idx, meta in enumerate([audio_meta, empty_audio_meta]):
56
- path = self.get_temp_path(f'data_{idx}_save.jsonl')
57
- save_audio_meta(path, meta)
58
- with open(path, 'r') as f:
59
- lines = f.readlines()
60
- read_meta = [AudioMeta.from_dict(json.loads(line)) for line in lines]
61
- assert len(read_meta) == len(meta)
62
- for m, read_m in zip(meta, read_meta):
63
- assert m == read_m
64
-
65
- def test_load_audio_meta(self):
66
- try:
67
- import dora
68
- except ImportError:
69
- dora = None # type: ignore
70
-
71
- audio_meta = [
72
- AudioMeta("mypath1", 1., 16_000, None, None, PathInZip('/foo/bar.zip:/relative/file1.json')),
73
- AudioMeta("mypath2", 2., 16_000, None, None, PathInZip('/foo/bar.zip:/relative/file2.json'))
74
- ]
75
- empty_meta = []
76
- for idx, meta in enumerate([audio_meta, empty_meta]):
77
- path = self.get_temp_path(f'data_{idx}_load.jsonl')
78
- with open(path, 'w') as f:
79
- for m in meta:
80
- json_str = json.dumps(m.to_dict()) + '\n'
81
- f.write(json_str)
82
- read_meta = load_audio_meta(path)
83
- assert len(read_meta) == len(meta)
84
- for m, read_m in zip(meta, read_meta):
85
- if dora:
86
- m.path = dora.git_save.to_absolute_path(m.path)
87
- assert m == read_m, f'original={m}, read={read_m}'
88
-
89
-
90
- class TestAudioDataset(TempDirMixin):
91
-
92
- def _create_audio_files(self,
93
- root_name: str,
94
- num_examples: int,
95
- durations: tp.Union[float, tp.Tuple[float, float]] = (0.1, 1.),
96
- sample_rate: int = 16_000,
97
- channels: int = 1):
98
- root_dir = self.get_temp_dir(root_name)
99
- for i in range(num_examples):
100
- if isinstance(durations, float):
101
- duration = durations
102
- elif isinstance(durations, tuple) and len(durations) == 1:
103
- duration = durations[0]
104
- elif isinstance(durations, tuple) and len(durations) == 2:
105
- duration = random.uniform(durations[0], durations[1])
106
- else:
107
- assert False
108
- n_frames = int(duration * sample_rate)
109
- wav = get_white_noise(channels, n_frames)
110
- path = os.path.join(root_dir, f'example_{i}.wav')
111
- save_wav(path, wav, sample_rate)
112
- return root_dir
113
-
114
- def _create_audio_dataset(self,
115
- root_name: str,
116
- total_num_examples: int,
117
- durations: tp.Union[float, tp.Tuple[float, float]] = (0.1, 1.),
118
- sample_rate: int = 16_000,
119
- channels: int = 1,
120
- segment_duration: tp.Optional[float] = None,
121
- num_examples: int = 10,
122
- shuffle: bool = True,
123
- return_info: bool = False):
124
- root_dir = self._create_audio_files(root_name, total_num_examples, durations, sample_rate, channels)
125
- dataset = AudioDataset.from_path(root_dir,
126
- minimal_meta=True,
127
- segment_duration=segment_duration,
128
- num_samples=num_examples,
129
- sample_rate=sample_rate,
130
- channels=channels,
131
- shuffle=shuffle,
132
- return_info=return_info)
133
- return dataset
134
-
135
- def test_dataset_full(self):
136
- total_examples = 10
137
- min_duration, max_duration = 1., 4.
138
- sample_rate = 16_000
139
- channels = 1
140
- dataset = self._create_audio_dataset(
141
- 'dset', total_examples, durations=(min_duration, max_duration),
142
- sample_rate=sample_rate, channels=channels, segment_duration=None)
143
- assert len(dataset) == total_examples
144
- assert dataset.sample_rate == sample_rate
145
- assert dataset.channels == channels
146
- for idx in range(len(dataset)):
147
- sample = dataset[idx]
148
- assert sample.shape[0] == channels
149
- assert sample.shape[1] <= int(max_duration * sample_rate)
150
- assert sample.shape[1] >= int(min_duration * sample_rate)
151
-
152
- def test_dataset_segment(self):
153
- total_examples = 10
154
- num_samples = 20
155
- min_duration, max_duration = 1., 4.
156
- segment_duration = 1.
157
- sample_rate = 16_000
158
- channels = 1
159
- dataset = self._create_audio_dataset(
160
- 'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate,
161
- channels=channels, segment_duration=segment_duration, num_examples=num_samples)
162
- assert len(dataset) == num_samples
163
- assert dataset.sample_rate == sample_rate
164
- assert dataset.channels == channels
165
- for idx in range(len(dataset)):
166
- sample = dataset[idx]
167
- assert sample.shape[0] == channels
168
- assert sample.shape[1] == int(segment_duration * sample_rate)
169
-
170
- def test_dataset_equal_audio_and_segment_durations(self):
171
- total_examples = 1
172
- num_samples = 2
173
- audio_duration = 1.
174
- segment_duration = 1.
175
- sample_rate = 16_000
176
- channels = 1
177
- dataset = self._create_audio_dataset(
178
- 'dset', total_examples, durations=audio_duration, sample_rate=sample_rate,
179
- channels=channels, segment_duration=segment_duration, num_examples=num_samples)
180
- assert len(dataset) == num_samples
181
- assert dataset.sample_rate == sample_rate
182
- assert dataset.channels == channels
183
- for idx in range(len(dataset)):
184
- sample = dataset[idx]
185
- assert sample.shape[0] == channels
186
- assert sample.shape[1] == int(segment_duration * sample_rate)
187
- # the random seek_time adds variability on audio read
188
- sample_1 = dataset[0]
189
- sample_2 = dataset[1]
190
- assert not torch.allclose(sample_1, sample_2)
191
-
192
- def test_dataset_samples(self):
193
- total_examples = 1
194
- num_samples = 2
195
- audio_duration = 1.
196
- segment_duration = 1.
197
- sample_rate = 16_000
198
- channels = 1
199
-
200
- create_dataset = partial(
201
- self._create_audio_dataset,
202
- 'dset', total_examples, durations=audio_duration, sample_rate=sample_rate,
203
- channels=channels, segment_duration=segment_duration, num_examples=num_samples,
204
- )
205
-
206
- dataset = create_dataset(shuffle=True)
207
- # when shuffle = True, we have different inputs for the same index across epoch
208
- sample_1 = dataset[0]
209
- sample_2 = dataset[0]
210
- assert not torch.allclose(sample_1, sample_2)
211
-
212
- dataset_noshuffle = create_dataset(shuffle=False)
213
- # when shuffle = False, we have same inputs for the same index across epoch
214
- sample_1 = dataset_noshuffle[0]
215
- sample_2 = dataset_noshuffle[0]
216
- assert torch.allclose(sample_1, sample_2)
217
-
218
- def test_dataset_return_info(self):
219
- total_examples = 10
220
- num_samples = 20
221
- min_duration, max_duration = 1., 4.
222
- segment_duration = 1.
223
- sample_rate = 16_000
224
- channels = 1
225
- dataset = self._create_audio_dataset(
226
- 'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate,
227
- channels=channels, segment_duration=segment_duration, num_examples=num_samples, return_info=True)
228
- assert len(dataset) == num_samples
229
- assert dataset.sample_rate == sample_rate
230
- assert dataset.channels == channels
231
- for idx in range(len(dataset)):
232
- sample, segment_info = dataset[idx]
233
- assert sample.shape[0] == channels
234
- assert sample.shape[1] == int(segment_duration * sample_rate)
235
- assert segment_info.sample_rate == sample_rate
236
- assert segment_info.total_frames == int(segment_duration * sample_rate)
237
- assert segment_info.n_frames <= int(segment_duration * sample_rate)
238
- assert segment_info.seek_time >= 0
239
-
240
- def test_dataset_return_info_no_segment_duration(self):
241
- total_examples = 10
242
- num_samples = 20
243
- min_duration, max_duration = 1., 4.
244
- segment_duration = None
245
- sample_rate = 16_000
246
- channels = 1
247
- dataset = self._create_audio_dataset(
248
- 'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate,
249
- channels=channels, segment_duration=segment_duration, num_examples=num_samples, return_info=True)
250
- assert len(dataset) == total_examples
251
- assert dataset.sample_rate == sample_rate
252
- assert dataset.channels == channels
253
- for idx in range(len(dataset)):
254
- sample, segment_info = dataset[idx]
255
- assert sample.shape[0] == channels
256
- assert sample.shape[1] == segment_info.total_frames
257
- assert segment_info.sample_rate == sample_rate
258
- assert segment_info.n_frames <= segment_info.total_frames
259
-
260
- def test_dataset_collate_fn(self):
261
- total_examples = 10
262
- num_samples = 20
263
- min_duration, max_duration = 1., 4.
264
- segment_duration = 1.
265
- sample_rate = 16_000
266
- channels = 1
267
- dataset = self._create_audio_dataset(
268
- 'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate,
269
- channels=channels, segment_duration=segment_duration, num_examples=num_samples, return_info=False)
270
- batch_size = 4
271
- dataloader = DataLoader(
272
- dataset,
273
- batch_size=batch_size,
274
- num_workers=0
275
- )
276
- for idx, batch in enumerate(dataloader):
277
- assert batch.shape[0] == batch_size
278
-
279
- @pytest.mark.parametrize("segment_duration", [1.0, None])
280
- def test_dataset_with_meta_collate_fn(self, segment_duration):
281
- total_examples = 10
282
- num_samples = 20
283
- min_duration, max_duration = 1., 4.
284
- segment_duration = 1.
285
- sample_rate = 16_000
286
- channels = 1
287
- dataset = self._create_audio_dataset(
288
- 'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate,
289
- channels=channels, segment_duration=segment_duration, num_examples=num_samples, return_info=True)
290
- batch_size = 4
291
- dataloader = DataLoader(
292
- dataset,
293
- batch_size=batch_size,
294
- collate_fn=dataset.collater,
295
- num_workers=0
296
- )
297
- for idx, batch in enumerate(dataloader):
298
- wav, infos = batch
299
- assert wav.shape[0] == batch_size
300
- assert len(infos) == batch_size
301
-
302
- @pytest.mark.parametrize("segment_duration,sample_on_weight,sample_on_duration,a_hist,b_hist,c_hist", [
303
- [1, True, True, 0.5, 0.5, 0.0],
304
- [1, False, True, 0.25, 0.5, 0.25],
305
- [1, True, False, 0.666, 0.333, 0.0],
306
- [1, False, False, 0.333, 0.333, 0.333],
307
- [None, False, False, 0.333, 0.333, 0.333]])
308
- def test_sample_with_weight(self, segment_duration, sample_on_weight, sample_on_duration, a_hist, b_hist, c_hist):
309
- random.seed(1234)
310
- rng = torch.Generator()
311
- rng.manual_seed(1234)
312
-
313
- def _get_histogram(dataset, repetitions=20_000):
314
- counts = {file_meta.path: 0. for file_meta in meta}
315
- for _ in range(repetitions):
316
- file_meta = dataset.sample_file(rng)
317
- counts[file_meta.path] += 1
318
- return {name: count / repetitions for name, count in counts.items()}
319
-
320
- meta = [
321
- AudioMeta(path='a', duration=5, sample_rate=1, weight=2),
322
- AudioMeta(path='b', duration=10, sample_rate=1, weight=None),
323
- AudioMeta(path='c', duration=5, sample_rate=1, weight=0),
324
- ]
325
- dataset = AudioDataset(
326
- meta, segment_duration=segment_duration, sample_on_weight=sample_on_weight,
327
- sample_on_duration=sample_on_duration)
328
- hist = _get_histogram(dataset)
329
- assert math.isclose(hist['a'], a_hist, abs_tol=0.01)
330
- assert math.isclose(hist['b'], b_hist, abs_tol=0.01)
331
- assert math.isclose(hist['c'], c_hist, abs_tol=0.01)
332
-
333
- def test_meta_duration_filter_all(self):
334
- meta = [
335
- AudioMeta(path='a', duration=5, sample_rate=1, weight=2),
336
- AudioMeta(path='b', duration=10, sample_rate=1, weight=None),
337
- AudioMeta(path='c', duration=5, sample_rate=1, weight=0),
338
- ]
339
- try:
340
- AudioDataset(meta, segment_duration=11, min_segment_ratio=1)
341
- assert False
342
- except AssertionError:
343
- assert True
344
-
345
- def test_meta_duration_filter_long(self):
346
- meta = [
347
- AudioMeta(path='a', duration=5, sample_rate=1, weight=2),
348
- AudioMeta(path='b', duration=10, sample_rate=1, weight=None),
349
- AudioMeta(path='c', duration=5, sample_rate=1, weight=0),
350
- ]
351
- dataset = AudioDataset(meta, segment_duration=None, min_segment_ratio=1, max_audio_duration=7)
352
- assert len(dataset) == 2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyparsing/util.py DELETED
@@ -1,235 +0,0 @@
1
- # util.py
2
- import warnings
3
- import types
4
- import collections
5
- import itertools
6
- from functools import lru_cache
7
- from typing import List, Union, Iterable
8
-
9
- _bslash = chr(92)
10
-
11
-
12
- class __config_flags:
13
- """Internal class for defining compatibility and debugging flags"""
14
-
15
- _all_names: List[str] = []
16
- _fixed_names: List[str] = []
17
- _type_desc = "configuration"
18
-
19
- @classmethod
20
- def _set(cls, dname, value):
21
- if dname in cls._fixed_names:
22
- warnings.warn(
23
- "{}.{} {} is {} and cannot be overridden".format(
24
- cls.__name__,
25
- dname,
26
- cls._type_desc,
27
- str(getattr(cls, dname)).upper(),
28
- )
29
- )
30
- return
31
- if dname in cls._all_names:
32
- setattr(cls, dname, value)
33
- else:
34
- raise ValueError("no such {} {!r}".format(cls._type_desc, dname))
35
-
36
- enable = classmethod(lambda cls, name: cls._set(name, True))
37
- disable = classmethod(lambda cls, name: cls._set(name, False))
38
-
39
-
40
- @lru_cache(maxsize=128)
41
- def col(loc: int, strg: str) -> int:
42
- """
43
- Returns current column within a string, counting newlines as line separators.
44
- The first column is number 1.
45
-
46
- Note: the default parsing behavior is to expand tabs in the input string
47
- before starting the parsing process. See
48
- :class:`ParserElement.parseString` for more
49
- information on parsing strings containing ``<TAB>`` s, and suggested
50
- methods to maintain a consistent view of the parsed string, the parse
51
- location, and line and column positions within the parsed string.
52
- """
53
- s = strg
54
- return 1 if 0 < loc < len(s) and s[loc - 1] == "\n" else loc - s.rfind("\n", 0, loc)
55
-
56
-
57
- @lru_cache(maxsize=128)
58
- def lineno(loc: int, strg: str) -> int:
59
- """Returns current line number within a string, counting newlines as line separators.
60
- The first line is number 1.
61
-
62
- Note - the default parsing behavior is to expand tabs in the input string
63
- before starting the parsing process. See :class:`ParserElement.parseString`
64
- for more information on parsing strings containing ``<TAB>`` s, and
65
- suggested methods to maintain a consistent view of the parsed string, the
66
- parse location, and line and column positions within the parsed string.
67
- """
68
- return strg.count("\n", 0, loc) + 1
69
-
70
-
71
- @lru_cache(maxsize=128)
72
- def line(loc: int, strg: str) -> str:
73
- """
74
- Returns the line of text containing loc within a string, counting newlines as line separators.
75
- """
76
- last_cr = strg.rfind("\n", 0, loc)
77
- next_cr = strg.find("\n", loc)
78
- return strg[last_cr + 1 : next_cr] if next_cr >= 0 else strg[last_cr + 1 :]
79
-
80
-
81
- class _UnboundedCache:
82
- def __init__(self):
83
- cache = {}
84
- cache_get = cache.get
85
- self.not_in_cache = not_in_cache = object()
86
-
87
- def get(_, key):
88
- return cache_get(key, not_in_cache)
89
-
90
- def set_(_, key, value):
91
- cache[key] = value
92
-
93
- def clear(_):
94
- cache.clear()
95
-
96
- self.size = None
97
- self.get = types.MethodType(get, self)
98
- self.set = types.MethodType(set_, self)
99
- self.clear = types.MethodType(clear, self)
100
-
101
-
102
- class _FifoCache:
103
- def __init__(self, size):
104
- self.not_in_cache = not_in_cache = object()
105
- cache = collections.OrderedDict()
106
- cache_get = cache.get
107
-
108
- def get(_, key):
109
- return cache_get(key, not_in_cache)
110
-
111
- def set_(_, key, value):
112
- cache[key] = value
113
- while len(cache) > size:
114
- cache.popitem(last=False)
115
-
116
- def clear(_):
117
- cache.clear()
118
-
119
- self.size = size
120
- self.get = types.MethodType(get, self)
121
- self.set = types.MethodType(set_, self)
122
- self.clear = types.MethodType(clear, self)
123
-
124
-
125
- class LRUMemo:
126
- """
127
- A memoizing mapping that retains `capacity` deleted items
128
-
129
- The memo tracks retained items by their access order; once `capacity` items
130
- are retained, the least recently used item is discarded.
131
- """
132
-
133
- def __init__(self, capacity):
134
- self._capacity = capacity
135
- self._active = {}
136
- self._memory = collections.OrderedDict()
137
-
138
- def __getitem__(self, key):
139
- try:
140
- return self._active[key]
141
- except KeyError:
142
- self._memory.move_to_end(key)
143
- return self._memory[key]
144
-
145
- def __setitem__(self, key, value):
146
- self._memory.pop(key, None)
147
- self._active[key] = value
148
-
149
- def __delitem__(self, key):
150
- try:
151
- value = self._active.pop(key)
152
- except KeyError:
153
- pass
154
- else:
155
- while len(self._memory) >= self._capacity:
156
- self._memory.popitem(last=False)
157
- self._memory[key] = value
158
-
159
- def clear(self):
160
- self._active.clear()
161
- self._memory.clear()
162
-
163
-
164
- class UnboundedMemo(dict):
165
- """
166
- A memoizing mapping that retains all deleted items
167
- """
168
-
169
- def __delitem__(self, key):
170
- pass
171
-
172
-
173
- def _escape_regex_range_chars(s: str) -> str:
174
- # escape these chars: ^-[]
175
- for c in r"\^-[]":
176
- s = s.replace(c, _bslash + c)
177
- s = s.replace("\n", r"\n")
178
- s = s.replace("\t", r"\t")
179
- return str(s)
180
-
181
-
182
- def _collapse_string_to_ranges(
183
- s: Union[str, Iterable[str]], re_escape: bool = True
184
- ) -> str:
185
- def is_consecutive(c):
186
- c_int = ord(c)
187
- is_consecutive.prev, prev = c_int, is_consecutive.prev
188
- if c_int - prev > 1:
189
- is_consecutive.value = next(is_consecutive.counter)
190
- return is_consecutive.value
191
-
192
- is_consecutive.prev = 0
193
- is_consecutive.counter = itertools.count()
194
- is_consecutive.value = -1
195
-
196
- def escape_re_range_char(c):
197
- return "\\" + c if c in r"\^-][" else c
198
-
199
- def no_escape_re_range_char(c):
200
- return c
201
-
202
- if not re_escape:
203
- escape_re_range_char = no_escape_re_range_char
204
-
205
- ret = []
206
- s = "".join(sorted(set(s)))
207
- if len(s) > 3:
208
- for _, chars in itertools.groupby(s, key=is_consecutive):
209
- first = last = next(chars)
210
- last = collections.deque(
211
- itertools.chain(iter([last]), chars), maxlen=1
212
- ).pop()
213
- if first == last:
214
- ret.append(escape_re_range_char(first))
215
- else:
216
- sep = "" if ord(last) == ord(first) + 1 else "-"
217
- ret.append(
218
- "{}{}{}".format(
219
- escape_re_range_char(first), sep, escape_re_range_char(last)
220
- )
221
- )
222
- else:
223
- ret = [escape_re_range_char(c) for c in s]
224
-
225
- return "".join(ret)
226
-
227
-
228
- def _flatten(ll: list) -> list:
229
- ret = []
230
- for i in ll:
231
- if isinstance(i, list):
232
- ret.extend(_flatten(i))
233
- else:
234
- ret.append(i)
235
- return ret
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/resolvelib/__init__.py DELETED
@@ -1,26 +0,0 @@
1
- __all__ = [
2
- "__version__",
3
- "AbstractProvider",
4
- "AbstractResolver",
5
- "BaseReporter",
6
- "InconsistentCandidate",
7
- "Resolver",
8
- "RequirementsConflicted",
9
- "ResolutionError",
10
- "ResolutionImpossible",
11
- "ResolutionTooDeep",
12
- ]
13
-
14
- __version__ = "1.0.1"
15
-
16
-
17
- from .providers import AbstractProvider, AbstractResolver
18
- from .reporters import BaseReporter
19
- from .resolvers import (
20
- InconsistentCandidate,
21
- RequirementsConflicted,
22
- ResolutionError,
23
- ResolutionImpossible,
24
- ResolutionTooDeep,
25
- Resolver,
26
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Audio-AGI/AudioSep/models/CLAP/open_clip/version.py DELETED
@@ -1 +0,0 @@
1
- __version__ = "0.2.1"
 
 
spaces/AzinZ/vitscn/modules.py DELETED
@@ -1,390 +0,0 @@
1
- import copy
2
- import math
3
- import numpy as np
4
- import scipy
5
- import torch
6
- from torch import nn
7
- from torch.nn import functional as F
8
-
9
- from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
10
- from torch.nn.utils import weight_norm, remove_weight_norm
11
-
12
- import commons
13
- from commons import init_weights, get_padding
14
- from transforms import piecewise_rational_quadratic_transform
15
-
16
-
17
- LRELU_SLOPE = 0.1
18
-
19
-
20
- class LayerNorm(nn.Module):
21
- def __init__(self, channels, eps=1e-5):
22
- super().__init__()
23
- self.channels = channels
24
- self.eps = eps
25
-
26
- self.gamma = nn.Parameter(torch.ones(channels))
27
- self.beta = nn.Parameter(torch.zeros(channels))
28
-
29
- def forward(self, x):
30
- x = x.transpose(1, -1)
31
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
32
- return x.transpose(1, -1)
33
-
34
-
35
- class ConvReluNorm(nn.Module):
36
- def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
37
- super().__init__()
38
- self.in_channels = in_channels
39
- self.hidden_channels = hidden_channels
40
- self.out_channels = out_channels
41
- self.kernel_size = kernel_size
42
- self.n_layers = n_layers
43
- self.p_dropout = p_dropout
44
- assert n_layers > 1, "Number of layers should be larger than 0."
45
-
46
- self.conv_layers = nn.ModuleList()
47
- self.norm_layers = nn.ModuleList()
48
- self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
49
- self.norm_layers.append(LayerNorm(hidden_channels))
50
- self.relu_drop = nn.Sequential(
51
- nn.ReLU(),
52
- nn.Dropout(p_dropout))
53
- for _ in range(n_layers-1):
54
- self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
55
- self.norm_layers.append(LayerNorm(hidden_channels))
56
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
57
- self.proj.weight.data.zero_()
58
- self.proj.bias.data.zero_()
59
-
60
- def forward(self, x, x_mask):
61
- x_org = x
62
- for i in range(self.n_layers):
63
- x = self.conv_layers[i](x * x_mask)
64
- x = self.norm_layers[i](x)
65
- x = self.relu_drop(x)
66
- x = x_org + self.proj(x)
67
- return x * x_mask
68
-
69
-
70
- class DDSConv(nn.Module):
71
- """
72
- Dialted and Depth-Separable Convolution
73
- """
74
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
75
- super().__init__()
76
- self.channels = channels
77
- self.kernel_size = kernel_size
78
- self.n_layers = n_layers
79
- self.p_dropout = p_dropout
80
-
81
- self.drop = nn.Dropout(p_dropout)
82
- self.convs_sep = nn.ModuleList()
83
- self.convs_1x1 = nn.ModuleList()
84
- self.norms_1 = nn.ModuleList()
85
- self.norms_2 = nn.ModuleList()
86
- for i in range(n_layers):
87
- dilation = kernel_size ** i
88
- padding = (kernel_size * dilation - dilation) // 2
89
- self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
90
- groups=channels, dilation=dilation, padding=padding
91
- ))
92
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
93
- self.norms_1.append(LayerNorm(channels))
94
- self.norms_2.append(LayerNorm(channels))
95
-
96
- def forward(self, x, x_mask, g=None):
97
- if g is not None:
98
- x = x + g
99
- for i in range(self.n_layers):
100
- y = self.convs_sep[i](x * x_mask)
101
- y = self.norms_1[i](y)
102
- y = F.gelu(y)
103
- y = self.convs_1x1[i](y)
104
- y = self.norms_2[i](y)
105
- y = F.gelu(y)
106
- y = self.drop(y)
107
- x = x + y
108
- return x * x_mask
109
-
110
-
111
- class WN(torch.nn.Module):
112
- def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
113
- super(WN, self).__init__()
114
- assert(kernel_size % 2 == 1)
115
- self.hidden_channels =hidden_channels
116
- self.kernel_size = kernel_size,
117
- self.dilation_rate = dilation_rate
118
- self.n_layers = n_layers
119
- self.gin_channels = gin_channels
120
- self.p_dropout = p_dropout
121
-
122
- self.in_layers = torch.nn.ModuleList()
123
- self.res_skip_layers = torch.nn.ModuleList()
124
- self.drop = nn.Dropout(p_dropout)
125
-
126
- if gin_channels != 0:
127
- cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
128
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
129
-
130
- for i in range(n_layers):
131
- dilation = dilation_rate ** i
132
- padding = int((kernel_size * dilation - dilation) / 2)
133
- in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
134
- dilation=dilation, padding=padding)
135
- in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
136
- self.in_layers.append(in_layer)
137
-
138
- # last one is not necessary
139
- if i < n_layers - 1:
140
- res_skip_channels = 2 * hidden_channels
141
- else:
142
- res_skip_channels = hidden_channels
143
-
144
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
145
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
146
- self.res_skip_layers.append(res_skip_layer)
147
-
148
- def forward(self, x, x_mask, g=None, **kwargs):
149
- output = torch.zeros_like(x)
150
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
151
-
152
- if g is not None:
153
- g = self.cond_layer(g)
154
-
155
- for i in range(self.n_layers):
156
- x_in = self.in_layers[i](x)
157
- if g is not None:
158
- cond_offset = i * 2 * self.hidden_channels
159
- g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
160
- else:
161
- g_l = torch.zeros_like(x_in)
162
-
163
- acts = commons.fused_add_tanh_sigmoid_multiply(
164
- x_in,
165
- g_l,
166
- n_channels_tensor)
167
- acts = self.drop(acts)
168
-
169
- res_skip_acts = self.res_skip_layers[i](acts)
170
- if i < self.n_layers - 1:
171
- res_acts = res_skip_acts[:,:self.hidden_channels,:]
172
- x = (x + res_acts) * x_mask
173
- output = output + res_skip_acts[:,self.hidden_channels:,:]
174
- else:
175
- output = output + res_skip_acts
176
- return output * x_mask
177
-
178
- def remove_weight_norm(self):
179
- if self.gin_channels != 0:
180
- torch.nn.utils.remove_weight_norm(self.cond_layer)
181
- for l in self.in_layers:
182
- torch.nn.utils.remove_weight_norm(l)
183
- for l in self.res_skip_layers:
184
- torch.nn.utils.remove_weight_norm(l)
185
-
186
-
187
- class ResBlock1(torch.nn.Module):
188
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
189
- super(ResBlock1, self).__init__()
190
- self.convs1 = nn.ModuleList([
191
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
192
- padding=get_padding(kernel_size, dilation[0]))),
193
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
194
- padding=get_padding(kernel_size, dilation[1]))),
195
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
196
- padding=get_padding(kernel_size, dilation[2])))
197
- ])
198
- self.convs1.apply(init_weights)
199
-
200
- self.convs2 = nn.ModuleList([
201
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
202
- padding=get_padding(kernel_size, 1))),
203
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
204
- padding=get_padding(kernel_size, 1))),
205
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
206
- padding=get_padding(kernel_size, 1)))
207
- ])
208
- self.convs2.apply(init_weights)
209
-
210
- def forward(self, x, x_mask=None):
211
- for c1, c2 in zip(self.convs1, self.convs2):
212
- xt = F.leaky_relu(x, LRELU_SLOPE)
213
- if x_mask is not None:
214
- xt = xt * x_mask
215
- xt = c1(xt)
216
- xt = F.leaky_relu(xt, LRELU_SLOPE)
217
- if x_mask is not None:
218
- xt = xt * x_mask
219
- xt = c2(xt)
220
- x = xt + x
221
- if x_mask is not None:
222
- x = x * x_mask
223
- return x
224
-
225
- def remove_weight_norm(self):
226
- for l in self.convs1:
227
- remove_weight_norm(l)
228
- for l in self.convs2:
229
- remove_weight_norm(l)
230
-
231
-
232
- class ResBlock2(torch.nn.Module):
233
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
234
- super(ResBlock2, self).__init__()
235
- self.convs = nn.ModuleList([
236
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
237
- padding=get_padding(kernel_size, dilation[0]))),
238
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
239
- padding=get_padding(kernel_size, dilation[1])))
240
- ])
241
- self.convs.apply(init_weights)
242
-
243
- def forward(self, x, x_mask=None):
244
- for c in self.convs:
245
- xt = F.leaky_relu(x, LRELU_SLOPE)
246
- if x_mask is not None:
247
- xt = xt * x_mask
248
- xt = c(xt)
249
- x = xt + x
250
- if x_mask is not None:
251
- x = x * x_mask
252
- return x
253
-
254
- def remove_weight_norm(self):
255
- for l in self.convs:
256
- remove_weight_norm(l)
257
-
258
-
259
- class Log(nn.Module):
260
- def forward(self, x, x_mask, reverse=False, **kwargs):
261
- if not reverse:
262
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
263
- logdet = torch.sum(-y, [1, 2])
264
- return y, logdet
265
- else:
266
- x = torch.exp(x) * x_mask
267
- return x
268
-
269
-
270
- class Flip(nn.Module):
271
- def forward(self, x, *args, reverse=False, **kwargs):
272
- x = torch.flip(x, [1])
273
- if not reverse:
274
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
275
- return x, logdet
276
- else:
277
- return x
278
-
279
-
280
- class ElementwiseAffine(nn.Module):
281
- def __init__(self, channels):
282
- super().__init__()
283
- self.channels = channels
284
- self.m = nn.Parameter(torch.zeros(channels,1))
285
- self.logs = nn.Parameter(torch.zeros(channels,1))
286
-
287
- def forward(self, x, x_mask, reverse=False, **kwargs):
288
- if not reverse:
289
- y = self.m + torch.exp(self.logs) * x
290
- y = y * x_mask
291
- logdet = torch.sum(self.logs * x_mask, [1,2])
292
- return y, logdet
293
- else:
294
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
295
- return x
296
-
297
-
298
- class ResidualCouplingLayer(nn.Module):
299
- def __init__(self,
300
- channels,
301
- hidden_channels,
302
- kernel_size,
303
- dilation_rate,
304
- n_layers,
305
- p_dropout=0,
306
- gin_channels=0,
307
- mean_only=False):
308
- assert channels % 2 == 0, "channels should be divisible by 2"
309
- super().__init__()
310
- self.channels = channels
311
- self.hidden_channels = hidden_channels
312
- self.kernel_size = kernel_size
313
- self.dilation_rate = dilation_rate
314
- self.n_layers = n_layers
315
- self.half_channels = channels // 2
316
- self.mean_only = mean_only
317
-
318
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
319
- self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
320
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
321
- self.post.weight.data.zero_()
322
- self.post.bias.data.zero_()
323
-
324
- def forward(self, x, x_mask, g=None, reverse=False):
325
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
326
- h = self.pre(x0) * x_mask
327
- h = self.enc(h, x_mask, g=g)
328
- stats = self.post(h) * x_mask
329
- if not self.mean_only:
330
- m, logs = torch.split(stats, [self.half_channels]*2, 1)
331
- else:
332
- m = stats
333
- logs = torch.zeros_like(m)
334
-
335
- if not reverse:
336
- x1 = m + x1 * torch.exp(logs) * x_mask
337
- x = torch.cat([x0, x1], 1)
338
- logdet = torch.sum(logs, [1,2])
339
- return x, logdet
340
- else:
341
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
342
- x = torch.cat([x0, x1], 1)
343
- return x
344
-
345
-
346
- class ConvFlow(nn.Module):
347
- def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
348
- super().__init__()
349
- self.in_channels = in_channels
350
- self.filter_channels = filter_channels
351
- self.kernel_size = kernel_size
352
- self.n_layers = n_layers
353
- self.num_bins = num_bins
354
- self.tail_bound = tail_bound
355
- self.half_channels = in_channels // 2
356
-
357
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
358
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
359
- self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
360
- self.proj.weight.data.zero_()
361
- self.proj.bias.data.zero_()
362
-
363
- def forward(self, x, x_mask, g=None, reverse=False):
364
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
365
- h = self.pre(x0)
366
- h = self.convs(h, x_mask, g=g)
367
- h = self.proj(h) * x_mask
368
-
369
- b, c, t = x0.shape
370
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
371
-
372
- unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
373
- unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
374
- unnormalized_derivatives = h[..., 2 * self.num_bins:]
375
-
376
- x1, logabsdet = piecewise_rational_quadratic_transform(x1,
377
- unnormalized_widths,
378
- unnormalized_heights,
379
- unnormalized_derivatives,
380
- inverse=reverse,
381
- tails='linear',
382
- tail_bound=self.tail_bound
383
- )
384
-
385
- x = torch.cat([x0, x1], 1) * x_mask
386
- logdet = torch.sum(logabsdet * x_mask, [1,2])
387
- if not reverse:
388
- return x, logdet
389
- else:
390
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Cmo Descargar Fifa 2022 Apk.md DELETED
@@ -1,45 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar FIFA 2022 APK</h1>
3
- <p>Si usted es un fan de los juegos de fútbol, es posible que se pregunte cómo descargar FIFA 2022 APK en su dispositivo Android. FIFA 2022 es la última entrega de la popular serie FIFA, desarrollada por EA Sports. Es uno de los juegos más esperados del año, con nuevas características de juego, modos, gráficos y más. En este artículo, le mostraremos cómo descargar FIFA 2022 APK de forma segura y fácil, así como algunas de las mejores características y consejos para jugar el juego. </p>
4
- <h2>cómo descargar fifa 2022 apk</h2><br /><p><b><b>Download File</b> ---> <a href="https://bltlly.com/2v6KiU">https://bltlly.com/2v6KiU</a></b></p><br /><br />
5
- <h2>Características de FIFA 2022</h2>
6
- <p>FIFA 2022 no es solo un re-skin de su predecesor. Es un juego completamente nuevo que trae cada partido en cada modo aún más cerca de lo real. Estas son algunas de las características que hacen que FIFA 2022 se destaque de otros juegos de fútbol:</p>
7
- <ul>
8
- <li><b>Tecnología HyperMotion:</b> Esta es una nueva tecnología de juego que utiliza datos del mundo real y aprendizaje automático para crear animaciones, movimientos y reacciones realistas para cada jugador en el campo. También permite tácticas de equipo avanzadas y formaciones que se adaptan a la situación. </li>
9
- <li><b>Modo de Copa Mundial:</b> Este es el único juego móvil con licencia de la Copa Mundial de la FIFA 2022 donde se puede volver a jugar el torneo oficial con cualquiera de las 32 naciones calificadas. También puedes jugar en auténticos estadios, kits, insignias y pelotas de la Copa Mundial, con comentarios localizados. </li>
10
- <li><b>Iconos y Héroes:</b> Puedes construir tu equipo definitivo con más de 100 iconos y héroes de fútbol, incluyendo leyendas como Paolo Maldini, Ronaldinho, Kylian Mbappé, Christian Pulisic, Vinicius Jr y Son Heung-min. También puedes subir el nivel de tu equipo desde fanfavorito de un contendiente de la UEFA Champions League. </li>
11
- <li><b>Simulación de fútbol de siguiente nivel:</b> Puedes experimentar estadios de fútbol nuevos y mejorados con sonidos y efectos visuales realistas, hasta 60 fps en dispositivos compatibles. También puede disfrutar en tiempo real 11v11 juego, con auténtica acción de fútbol y la física. </li>
12
-
13
- </ul>
14
- <h2>Requisitos del sistema FIFA 2022</h2>
15
- <p>Antes de descargar FIFA 2022 APK, es necesario asegurarse de que su dispositivo Android cumple con los requisitos mínimos o recomendados del sistema para el juego. Aquí están los requisitos del sistema para FIFA 2022 APK:</p>
16
- <tabla>
17
- <tr><th>Requisitos mínimos</th><th>Requisitos recomendados</th></tr>
18
- <tr><td>OS: 64-bit Android 6.0 o superior<br>Procesador: Athlon X4 880K @4GHz o equivalente<br>Memoria: 8 GB<br>Tarjeta gráfica: Radeon HD 7850 o equivalente<br>Espacio libre en disco: 50 GB</td><td>OS>OS: 64-bit br8.0 más alto o<br>Procesador fx: 813.6GHz o equivalente: 8 GB<br>Tarjeta gráfica: Radeon R9 270x o equivalente<br>Espacio libre en disco: 50 GB</td></tr>
19
- </tabla>
20
- <h2>FIFA 2022 consejos de descarga</h2>
21
- <p>Ahora que sabes lo que es FIFA 2022 APK y qué características y requisitos del sistema que tiene, es posible que esté ansioso por descargarlo en su dispositivo Android. Sin embargo, hay algunas cosas que debe tener cuidado al descargar un archivo APK de Internet. Aquí hay algunos consejos para ayudarle a descargar FIFA 2022 APK de forma segura y fácil:</p>
22
- <ol>
23
- <li><b>Encuentra una fuente confiable:</ <b>Encuentra una fuente confiable:</b> No todos los archivos APK son seguros para descargar e instalar en su dispositivo. Algunos de ellos pueden contener malware, virus u otro software dañino que puede dañar su dispositivo o robar su información personal. Por lo tanto, siempre debe descargar archivos APK de fuentes confiables y de buena reputación, como el sitio web oficial de EA Sports, Google Play Store u otros sitios web verificados de terceros. También puede comprobar las revisiones y valoraciones del archivo APK antes de descargarlo para ver si otros usuarios han tenido problemas con él. </li>
24
-
25
- <li><b>Evite errores comunes:</b> A veces, puede encontrar algunos errores o problemas al descargar o instalar un archivo APK. Por ejemplo, es posible que recibas un mensaje diciendo que la aplicación no es compatible con tu dispositivo, o que no hay suficiente espacio de almacenamiento en tu dispositivo, o que la aplicación ha dejado de funcionar. Para evitar estos errores, debe asegurarse de que su dispositivo cumple con los requisitos del sistema para FIFA 2022 APK, que tiene suficiente espacio libre en el disco de su dispositivo, y que ha actualizado el software del dispositivo a la última versión. También debe borrar la caché y los datos de la aplicación si se bloquea o se congela. </li>
26
- </ol>
27
- <h2>Preguntas frecuentes de la FIFA 2022</h2>
28
- <p>Aquí están algunas de las preguntas más frecuentes sobre FIFA 2022 APK:</p>
29
- <p></p>
30
- <dl>
31
- <dt>Q: ¿Es FIFA 2022 APK libre para descargar y jugar? </dt>
32
- <dd>A: Sí, FIFA 2022 APK es gratis para descargar y jugar en su dispositivo Android. Sin embargo, algunas características y elementos del juego pueden requerir compras con dinero real. </dd>
33
- <dt>Q: ¿Necesito una conexión a Internet para jugar FIFA 2022 APK? </dt>
34
- <dd>A: Sí, necesita una conexión a Internet estable para jugar FIFA 2022 APK en línea con otros jugadores o acceder a algunos de los modos de juego y características. </dd>
35
- <dt>Q: ¿Cómo puedo actualizar FIFA 2022 APK a la última versión? </dt>
36
- <dd>A: Puede actualizar FIFA 2022 APK a la última versión mediante la descarga e instalación del nuevo archivo APK de la misma fuente que lo descargó desde. Alternativamente, puede habilitar actualizaciones automáticas en la configuración del dispositivo o en la configuración de la aplicación. </dd>
37
- <dt>Q: ¿Cómo puedo transferir mi progreso y datos de FIFA 2022 de un dispositivo a otro? </dt>
38
- <dd>A: Puedes transferir tu progreso y datos de FIFA 2022 de un dispositivo a otro iniciando sesión con la misma cuenta de EA en ambos dispositivos. También puede utilizar opciones de almacenamiento en la nube o de copia de seguridad en la configuración de la aplicación. </dd>
39
- <dt>Q: ¿Cómo puedo contactar a EA Sports para obtener apoyo o comentarios sobre FIFA 2022 APK? </dt>
40
-
41
- </dl>
42
- <h1>Conclusión</h1>
43
- <p>FIFA 2022 APK es un juego imprescindible para cualquier fanático del fútbol que quiera disfrutar de una simulación de fútbol realista e inmersiva en su dispositivo Android. Ofrece nuevas y mejoradas características de juego, modos, gráficos y más que te mantendrán enganchado durante horas. Para descargar FIFA 2022 APK de forma segura y fácil, es necesario seguir algunos consejos simples y trucos que hemos compartido en este artículo. Esperamos que este artículo le ha ayudado a aprender cómo descargar FIFA 2022 APK y disfrutar jugando en su dispositivo. Si tiene alguna pregunta o comentario, no dude en dejarlos abajo. </p> 64aa2da5cf<br />
44
- <br />
45
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Fifa Street 4 Pc Bagas31.md DELETED
@@ -1,155 +0,0 @@
1
-
2
- <h1>Descargar FIFA Street 4 PC Bagas31: Una guía para disfrutar del fútbol callejero en su computadora</h1>
3
- <p>¿Te encanta el fútbol pero quieres experimentarlo de una manera diferente? ¿Quieres mostrar tus habilidades y trucos en varios lugares de la calle en todo el mundo? ¿Quieres jugar con tus jugadores y equipos favoritos en un juego divertido y estilo árcade? Si respondiste sí a cualquiera de estas preguntas, entonces deberías probar FIFA Street 4, un juego que te permitirá disfrutar del fútbol callejero en tu ordenador. </p>
4
- <h2>descargar fifa street 4 pc bagas31</h2><br /><p><b><b>DOWNLOAD</b> &middot;&middot;&middot; <a href="https://bltlly.com/2v6JFm">https://bltlly.com/2v6JFm</a></b></p><br /><br />
5
- <h2>¿Qué es FIFA Street 4?</h2>
6
- <p>FIFA Street 4 es un juego desarrollado por EA Sports y lanzado en 2012 para PlayStation 3 y Xbox 360. Es la cuarta entrega de la serie FIFA Street, que se centra en el fútbol callejero en lugar del fútbol tradicional. El juego cuenta con más de 50 ubicaciones diferentes, desde Río de Janeiro hasta Londres, donde puedes jugar con varios modos y reglas. También puedes personalizar tu propio equipo y jugador, y desbloquear nuevos objetos y habilidades a medida que avanzas. </p>
7
- <p>El juego tiene varias características que lo hacen único y emocionante, como:</p>
8
- <ul>
9
- <li>El sistema de control de la bola de la calle, que le permite realizar más realista y fluido dribbling, pasando, y tiro. </li>
10
- <li>El modo World Tour, que te permite crear tu propio equipo y competir en torneos alrededor del mundo. </li>
11
- <li>El modo Temporadas Online, que te permite jugar partidas online contra otros jugadores y posicionarte en divisiones. </li>
12
- <li>El modo Street Network, que te permite compartir tus logros, vídeos y fotos con tus amigos y rivales. </li>
13
- <li>El modo de jugadores legendarios, que te permite jugar con o contra algunos de los mejores jugadores de fútbol de todos los tiempos, como Pelé, Zidane o Messi.</li>
14
- </ul>
15
- <h2>¿Por qué descargar FIFA Street 4 PC Bagas31? </h2>
16
-
17
- <p>Al descargar FIFA Street 4 PC Bagas31, puedes disfrutar de varios beneficios, como:</p>
18
- <ul>
19
- <li>Puedes jugar a FIFA Street 4 en tu PC sin necesidad de consola o emulador. </li>
20
- <li>Puedes ahorrar dinero no comprando una consola o un disco de juego. </li>
21
- <li>Puedes jugar a FIFA Street 4 con mejores gráficos y rendimiento que en una consola. </li>
22
- <li> Puede utilizar el teclado y el ratón o un controlador para jugar FIFA Street 4.</li>
23
- <li> Puede acceder a todas las características y modos de FIFA Street 4 sin restricciones o limitaciones. </li>
24
- </ul>
25
- <h2>¿Cómo descargar FIFA Street 4 PC Bagas31? </h2>
26
- <p>Descargar FIFA Street 4 PC Bagas31 es fácil y sencillo. Solo tienes que seguir estos pasos:</p>
27
- <p></p>
28
- <ol>
29
- <li>Ir a [1](https://www.al - Ir a [1](https://www.alvindayu.com/2018/02/download-fifa-street-4-pc-full-version.html) y haga clic en el enlace de descarga en la parte inferior de la página. - Espere a que finalice la descarga. El tamaño del archivo es de aproximadamente 4,8 GB, por lo que puede tardar algún tiempo dependiendo de su velocidad de Internet. - Extraer el archivo descargado usando WinRAR o 7-Zip. Obtendrá una carpeta llamada FIFA Street 4 PC Bagas31. - Abra la carpeta y ejecute el archivo setup.exe como administrador. Siga las instrucciones en la pantalla para instalar el juego. - Después de la instalación se ha completado, ejecutar el juego desde el acceso directo de escritorio o el menú de inicio. - Disfrutar de jugar FIFA Street 4 PC Bagas31! <h2>¿Cómo se juega FIFA Street 4 PC Bagas31? </h2>
30
- <p>Jugar FIFA Street 4 PC Bagas31 es similar a jugar cualquier otro juego de fútbol en PC. Puede utilizar el teclado y el ratón o un controlador para controlar a sus jugadores y realizar varias acciones. Aquí hay un resumen rápido de la jugabilidad y los controles:</p>
31
-
32
- <p>Los controles de FIFA Street 4 PC Bagas31 se basan en el sistema de control de la bola de la calle, que le permite realizar más realista y fluido dribbling, pasando, y tiro. También puedes usar trucos y habilidades para superar a tus oponentes y anotar goles espectaculares. Aquí hay una tabla de los controles básicos para teclado y ratón y controlador:</p>
33
- <tabla>
34
- <tr>
35
- <th>Acción</th>
36
- <th>Teclado y ratón</th>
37
- <th>Controlador</th>
38
- </tr>
39
- <tr>
40
- <td>Mover</td>
41
- <td>WASD</td>
42
- <td>Stick izquierdo</td>
43
- </tr>
44
- <tr>
45
- <td>Sprint</td>
46
- <td>Shift</td>
47
- <td>Gatillo derecho</td>
48
- </tr>
49
- <tr>
50
- <td>Pase</td>
51
- <td>Clic izquierdo</td>
52
- <td>A</td>
53
- </tr>
54
- <tr>
55
- <td>Disparar</td>
56
- <td>Clic derecho</td>
57
- <td>B</td>
58
- </tr>
59
- <tr>
60
- <td>Paso/Cruz de lob</td>
61
- <td>E</td>
62
- <td>X</td>
63
- </tr>
64
- <tr>
65
- <td>A través de la bola</td>
66
- <td>R</td>
67
- <td>Y</td>
68
- </tr>
69
- <tr>
70
- <td>Movimiento/truco de habilidad</td>
71
- <td>Q o F o rueda del ratón</td>
72
- <td>Golpe derecho o parachoques izquierdo o derecho</td>
73
- </tr>
74
- <tr>
75
- <td>Tackle/Slide Tackle</td>
76
- <td>Espacio o C</td>
77
- <td>A o X</td>
78
- </tr>
79
- <tr>
80
- <td>Jockey/Contener/Teammate Contener</td>
81
- <td>Z o X o V o B o N o M o Alt o Ctrl o Shift o Tab o Caps Bloquear o Entrar o Retroceso o Eliminar o Insertar o Inicio o Fin o Página Abajo o Teclas de flecha (cualquier tecla excepto WASD, Q, R, F, Esc)/<td>
82
- <td>Disparador izquierdo o derecho o parachoques izquierdo o derecho (cualquier botón excepto Stick izquierdo, Stick derecho, A, B, X, Y)</td>
83
- </tr>
84
- <tr><td colspan="3">Nota: Puedes cambiar los controles en la configuración del juego si lo prefieres. </td></tr> <h2> ¿Cuáles son los requisitos del sistema para FIFA Street 4 PC Bagas31? </h2>
85
- <p>Antes de descargar y jugar FIFA Street 4 PC Bagas31, debe asegurarse de que su computadora cumple con los requisitos mínimos y recomendados del sistema para ejecutar el juego. Aquí hay una lista de las especificaciones que necesita:</p>
86
- <tabla>
87
- <tr>
88
- <th>Requisitos mínimos</th>
89
- <th>Requisitos recomendados</th>
90
- </tr>
91
- <tr>
92
- <td>OS: Windows 7/8/10 (64 bits)</td>
93
- <td>OS: Windows 10 (64 bits)</td>
94
-
95
- <tr>
96
- <td>CPU: Intel Core 2 Duo E6600 o AMD Athlon 64 X2 5400+</td>
97
- <td>CPU: Intel Core i3-2100 o AMD Phenom II X4 965</td>
98
- </tr>
99
- <tr>
100
- <td>RAM: 4 GB</td>
101
- <td>RAM: 8 GB</td>
102
- </tr>
103
- <tr>
104
- <td>GPU: NVIDIA GeForce GTX 460 o AMD Radeon HD 5770</td>
105
- <td>GPU: NVIDIA GeForce GTX 660 o AMD Radeon HD 7850</td>
106
- </tr>
107
- <tr>
108
- <td>DirectX: Versión 11</td>
109
- <td>DirectX: Versión 11</td>
110
- </tr>
111
- <tr>
112
- <td>Almacenamiento: 10 GB de espacio disponible</td>
113
- <td>Almacenamiento: 10 GB de espacio disponible</td>
114
- </tr>
115
- <tr><td colspan="2">Nota: Estos son los requisitos estimados del sistema basados en la versión original de la consola de FIFA Street 4. Los requisitos reales del sistema pueden variar dependiendo de la configuración de su PC y las modificaciones realizadas por Bagas31.</td></tr>
116
- <h2>¿Cómo mejorar el rendimiento de FIFA Street 4 PC Bagas31? </h2>
117
- <p>Si encuentras algún problema o problema al jugar FIFA Street 4 PC Bagas31, como retraso, tartamudeo, estrellarse o errores, puedes probar algunos de estos consejos y trucos para mejorar el rendimiento del juego y solucionar los problemas comunes:</p>
118
- <ul>
119
- <li>Actualice sus controladores y Windows a la última versión. </li>
120
- <li>Ejecutar el juego como administrador y en modo de compatibilidad para Windows 7.</li>
121
- <li>Deshabilita cualquier programa antivirus o firewall que pueda interferir con el juego. </li>
122
- <li>Cierre cualquier programa de fondo innecesario que pueda consumir su CPU, RAM o ancho de banda. </li>
123
- <li>Baja la configuración del juego y la resolución para que coincida con las capacidades de tu PC. </li>
124
- <li> Habilitar la sincronización en V y limitar la velocidad de fotogramas para evitar el desgarro de la pantalla y el sobrecalentamiento. </li>
125
- <li>Elimine cualquier archivo dañado o desactualizado en la carpeta del juego y vuelva a instalar el juego si es necesario. </li>
126
- <li>Busque en línea soluciones o parches específicos para su problema en particular. </li>
127
- </ul>
128
- <h2>¿Cuáles son algunas alternativas a FIFA Street 4 PC Bagas31? </h2>
129
- <p>Si estás buscando otros juegos de fútbol callejero o emuladores que puedes jugar en tu PC, puedes ver algunas de estas alternativas a FIFA Street 4 PC Bagas31:</p>
130
-
131
- <li>FIFA Street (2012) Emulador de PS3: Esta es la versión original de FIFA Street 4 que puedes jugar en tu PC usando un emulador de PS3 como RPCS3. Necesitarás un disco de juego o un archivo ISO de PS3, un archivo BIOS de PS3 y un PC potente para ejecutar el emulador sin problemas. Puede encontrar más información e instrucciones sobre cómo usar RPCS3 [aquí]. </li>
132
- <li>FIFA Street 2 (2006) PCSX2 Emulator: Esta es la segunda entrega de la serie FIFA Street que puedes jugar en tu PC usando un emulador de PS2 como PCSX2. Necesitarás un disco de juego o un archivo ISO de PS2, un archivo BIOS de PS2 y un PC decente para ejecutar el emulador correctamente. Puede encontrar más información e instrucciones sobre cómo usar PCSX2 [aquí]. </li>
133
- <li>FIFA Street 3 (2008) Xbox 360 Emulator: Esta es la tercera entrega de la serie FIFA Street que puedes jugar en tu PC usando un emulador de Xbox 360 como Xenia. Necesitarás un disco de juego o un archivo ISO de Xbox 360, un archivo BIOS de Xbox 360 y un PC potente para ejecutar el emulador sin problemas. Puede encontrar más información e instrucciones sobre cómo usar Xenia [aquí]. </li>
134
- <li>NBA Street Homecourt (2007) Xbox 360 Emulator: Este es un juego de baloncesto que tiene un modo de juego y estilo similar a FIFA Street. Puedes jugarlo en tu PC usando un emulador de Xbox 360 como Xenia. Necesitarás un disco de juego o un archivo ISO de Xbox 360, un archivo BIOS de Xbox 360 y un PC potente para ejecutar el emulador sin problemas. Puede encontrar más información e instrucciones sobre cómo usar Xenia [aquí]. </li>
135
- <li>In - Inazuma Eleven Strikers (2012) Dolphin Emulator: Este es un juego de fútbol que tiene una mezcla de elementos de árcade y RPG. Puedes jugarlo en tu PC usando un emulador de Wii como Dolphin. Necesitarás un disco de juego de Wii o un archivo ISO, un archivo BIOS de Wii y un PC decente para ejecutar el emulador correctamente. Puede encontrar más información e instrucciones sobre cómo usar Dolphin [aquí]. </li>
136
- </ul>
137
- <h2>Conclusión</h2>
138
-
139
- <p>Así que, ¿qué estás esperando? Descargar FIFA Street 4 PC Bagas31 hoy y divertirse jugando fútbol de la calle en su ordenador! </p>
140
- <h2>Preguntas frecuentes</h2>
141
- <p>Aquí hay algunas preguntas frecuentes sobre FIFA Street 4 PC Bagas31:</p>
142
- <ol>
143
- <li> ¿Es seguro descargar FIFA Street 4 PC Bagas31? </li>
144
- <p>Sí, FIFA Street 4 PC Bagas31 es seguro para descargar desde el sitio web de Bagas31. Sin embargo, siempre debe escanear los archivos descargados con un programa antivirus antes de abrirlos, y tenga cuidado con cualquier pop-ups o anuncios que puedan aparecer en el sitio web. </p>
145
- <li> ¿Es FIFA Street 4 PC Bagas31 legal para descargar? </li>
146
- <p>No, FIFA Street 4 PC Bagas31 no es legal para descargar, ya que es una versión modificada de un juego con derechos de autor que nunca fue lanzado oficialmente para PC. Descargar y jugar FIFA Street 4 PC Bagas31 es bajo su propio riesgo, y usted puede enfrentar consecuencias legales si es capturado por las autoridades. </p>
147
- <li>¿Puedo jugar FIFA Street 4 PC Bagas31 sin conexión? </li>
148
- <p>Sí, puede jugar FIFA Street 4 PC Bagas31 sin conexión a Internet. Sin embargo, no podrá acceder al modo Online Seasons o al modo Street Network, que requieren una conexión en línea. </p>
149
- <li>¿Puedo jugar FIFA Street 4 PC Bagas31 con mis amigos? </li>
150
- <p>Sí, puedes jugar FIFA Street 4 PC Bagas31 con tus amigos en línea o localmente. Para jugar en línea, necesitará una conexión a Internet y una cuenta en los servidores de EA. Para jugar localmente, necesitarás dos controladores o teclados y ratones, y una opción de pantalla dividida en la configuración del juego. </p>
151
- <li>¿Puedo actualizar FIFA Street 4 PC Bagas31? </li>
152
- <p>No, no puedes actualizar FIFA Street 4 PC Bagas31, ya que es una versión modificada del juego que no recibe ninguna actualización o parches oficiales de EA Sports. Sin embargo, puedes encontrar algunas actualizaciones o mods no oficiales de otras fuentes en línea que pueden mejorar o cambiar el juego de alguna manera. </p>
153
- </ol></p> 64aa2da5cf<br />
154
- <br />
155
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/ec2/__init__.py DELETED
@@ -1,12 +0,0 @@
1
- # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"). You
4
- # may not use this file except in compliance with the License. A copy of
5
- # the License is located at
6
- #
7
- # https://aws.amazon.com/apache2.0/
8
- #
9
- # or in the "license" file accompanying this file. This file is
10
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11
- # ANY KIND, either express or implied. See the License for the specific
12
- # language governing permissions and limitations under the License.
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/models/scheme.py DELETED
@@ -1,31 +0,0 @@
1
- """
2
- For types associated with installation schemes.
3
-
4
- For a general overview of available schemes and their context, see
5
- https://docs.python.org/3/install/index.html#alternate-installation.
6
- """
7
-
8
-
9
- SCHEME_KEYS = ["platlib", "purelib", "headers", "scripts", "data"]
10
-
11
-
12
- class Scheme:
13
- """A Scheme holds paths which are used as the base directories for
14
- artifacts associated with a Python package.
15
- """
16
-
17
- __slots__ = SCHEME_KEYS
18
-
19
- def __init__(
20
- self,
21
- platlib: str,
22
- purelib: str,
23
- headers: str,
24
- scripts: str,
25
- data: str,
26
- ) -> None:
27
- self.platlib = platlib
28
- self.purelib = purelib
29
- self.headers = headers
30
- self.scripts = scripts
31
- self.data = data
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/database.py DELETED
@@ -1,1350 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- #
3
- # Copyright (C) 2012-2017 The Python Software Foundation.
4
- # See LICENSE.txt and CONTRIBUTORS.txt.
5
- #
6
- """PEP 376 implementation."""
7
-
8
- from __future__ import unicode_literals
9
-
10
- import base64
11
- import codecs
12
- import contextlib
13
- import hashlib
14
- import logging
15
- import os
16
- import posixpath
17
- import sys
18
- import zipimport
19
-
20
- from . import DistlibException, resources
21
- from .compat import StringIO
22
- from .version import get_scheme, UnsupportedVersionError
23
- from .metadata import (Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME,
24
- LEGACY_METADATA_FILENAME)
25
- from .util import (parse_requirement, cached_property, parse_name_and_version,
26
- read_exports, write_exports, CSVReader, CSVWriter)
27
-
28
-
29
- __all__ = ['Distribution', 'BaseInstalledDistribution',
30
- 'InstalledDistribution', 'EggInfoDistribution',
31
- 'DistributionPath']
32
-
33
-
34
- logger = logging.getLogger(__name__)
35
-
36
- EXPORTS_FILENAME = 'pydist-exports.json'
37
- COMMANDS_FILENAME = 'pydist-commands.json'
38
-
39
- DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED',
40
- 'RESOURCES', EXPORTS_FILENAME, 'SHARED')
41
-
42
- DISTINFO_EXT = '.dist-info'
43
-
44
-
45
- class _Cache(object):
46
- """
47
- A simple cache mapping names and .dist-info paths to distributions
48
- """
49
- def __init__(self):
50
- """
51
- Initialise an instance. There is normally one for each DistributionPath.
52
- """
53
- self.name = {}
54
- self.path = {}
55
- self.generated = False
56
-
57
- def clear(self):
58
- """
59
- Clear the cache, setting it to its initial state.
60
- """
61
- self.name.clear()
62
- self.path.clear()
63
- self.generated = False
64
-
65
- def add(self, dist):
66
- """
67
- Add a distribution to the cache.
68
- :param dist: The distribution to add.
69
- """
70
- if dist.path not in self.path:
71
- self.path[dist.path] = dist
72
- self.name.setdefault(dist.key, []).append(dist)
73
-
74
-
75
- class DistributionPath(object):
76
- """
77
- Represents a set of distributions installed on a path (typically sys.path).
78
- """
79
- def __init__(self, path=None, include_egg=False):
80
- """
81
- Create an instance from a path, optionally including legacy (distutils/
82
- setuptools/distribute) distributions.
83
- :param path: The path to use, as a list of directories. If not specified,
84
- sys.path is used.
85
- :param include_egg: If True, this instance will look for and return legacy
86
- distributions as well as those based on PEP 376.
87
- """
88
- if path is None:
89
- path = sys.path
90
- self.path = path
91
- self._include_dist = True
92
- self._include_egg = include_egg
93
-
94
- self._cache = _Cache()
95
- self._cache_egg = _Cache()
96
- self._cache_enabled = True
97
- self._scheme = get_scheme('default')
98
-
99
- def _get_cache_enabled(self):
100
- return self._cache_enabled
101
-
102
- def _set_cache_enabled(self, value):
103
- self._cache_enabled = value
104
-
105
- cache_enabled = property(_get_cache_enabled, _set_cache_enabled)
106
-
107
- def clear_cache(self):
108
- """
109
- Clears the internal cache.
110
- """
111
- self._cache.clear()
112
- self._cache_egg.clear()
113
-
114
-
115
- def _yield_distributions(self):
116
- """
117
- Yield .dist-info and/or .egg(-info) distributions.
118
- """
119
- # We need to check if we've seen some resources already, because on
120
- # some Linux systems (e.g. some Debian/Ubuntu variants) there are
121
- # symlinks which alias other files in the environment.
122
- seen = set()
123
- for path in self.path:
124
- finder = resources.finder_for_path(path)
125
- if finder is None:
126
- continue
127
- r = finder.find('')
128
- if not r or not r.is_container:
129
- continue
130
- rset = sorted(r.resources)
131
- for entry in rset:
132
- r = finder.find(entry)
133
- if not r or r.path in seen:
134
- continue
135
- try:
136
- if self._include_dist and entry.endswith(DISTINFO_EXT):
137
- possible_filenames = [METADATA_FILENAME,
138
- WHEEL_METADATA_FILENAME,
139
- LEGACY_METADATA_FILENAME]
140
- for metadata_filename in possible_filenames:
141
- metadata_path = posixpath.join(entry, metadata_filename)
142
- pydist = finder.find(metadata_path)
143
- if pydist:
144
- break
145
- else:
146
- continue
147
-
148
- with contextlib.closing(pydist.as_stream()) as stream:
149
- metadata = Metadata(fileobj=stream, scheme='legacy')
150
- logger.debug('Found %s', r.path)
151
- seen.add(r.path)
152
- yield new_dist_class(r.path, metadata=metadata,
153
- env=self)
154
- elif self._include_egg and entry.endswith(('.egg-info',
155
- '.egg')):
156
- logger.debug('Found %s', r.path)
157
- seen.add(r.path)
158
- yield old_dist_class(r.path, self)
159
- except Exception as e:
160
- msg = 'Unable to read distribution at %s, perhaps due to bad metadata: %s'
161
- logger.warning(msg, r.path, e)
162
- import warnings
163
- warnings.warn(msg % (r.path, e), stacklevel=2)
164
-
165
- def _generate_cache(self):
166
- """
167
- Scan the path for distributions and populate the cache with
168
- those that are found.
169
- """
170
- gen_dist = not self._cache.generated
171
- gen_egg = self._include_egg and not self._cache_egg.generated
172
- if gen_dist or gen_egg:
173
- for dist in self._yield_distributions():
174
- if isinstance(dist, InstalledDistribution):
175
- self._cache.add(dist)
176
- else:
177
- self._cache_egg.add(dist)
178
-
179
- if gen_dist:
180
- self._cache.generated = True
181
- if gen_egg:
182
- self._cache_egg.generated = True
183
-
184
- @classmethod
185
- def distinfo_dirname(cls, name, version):
186
- """
187
- The *name* and *version* parameters are converted into their
188
- filename-escaped form, i.e. any ``'-'`` characters are replaced
189
- with ``'_'`` other than the one in ``'dist-info'`` and the one
190
- separating the name from the version number.
191
-
192
- :parameter name: is converted to a standard distribution name by replacing
193
- any runs of non- alphanumeric characters with a single
194
- ``'-'``.
195
- :type name: string
196
- :parameter version: is converted to a standard version string. Spaces
197
- become dots, and all other non-alphanumeric characters
198
- (except dots) become dashes, with runs of multiple
199
- dashes condensed to a single dash.
200
- :type version: string
201
- :returns: directory name
202
- :rtype: string"""
203
- name = name.replace('-', '_')
204
- return '-'.join([name, version]) + DISTINFO_EXT
205
-
206
- def get_distributions(self):
207
- """
208
- Provides an iterator that looks for distributions and returns
209
- :class:`InstalledDistribution` or
210
- :class:`EggInfoDistribution` instances for each one of them.
211
-
212
- :rtype: iterator of :class:`InstalledDistribution` and
213
- :class:`EggInfoDistribution` instances
214
- """
215
- if not self._cache_enabled:
216
- for dist in self._yield_distributions():
217
- yield dist
218
- else:
219
- self._generate_cache()
220
-
221
- for dist in self._cache.path.values():
222
- yield dist
223
-
224
- if self._include_egg:
225
- for dist in self._cache_egg.path.values():
226
- yield dist
227
-
228
- def get_distribution(self, name):
229
- """
230
- Looks for a named distribution on the path.
231
-
232
- This function only returns the first result found, as no more than one
233
- value is expected. If nothing is found, ``None`` is returned.
234
-
235
- :rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution`
236
- or ``None``
237
- """
238
- result = None
239
- name = name.lower()
240
- if not self._cache_enabled:
241
- for dist in self._yield_distributions():
242
- if dist.key == name:
243
- result = dist
244
- break
245
- else:
246
- self._generate_cache()
247
-
248
- if name in self._cache.name:
249
- result = self._cache.name[name][0]
250
- elif self._include_egg and name in self._cache_egg.name:
251
- result = self._cache_egg.name[name][0]
252
- return result
253
-
254
- def provides_distribution(self, name, version=None):
255
- """
256
- Iterates over all distributions to find which distributions provide *name*.
257
- If a *version* is provided, it will be used to filter the results.
258
-
259
- This function only returns the first result found, since no more than
260
- one values are expected. If the directory is not found, returns ``None``.
261
-
262
- :parameter version: a version specifier that indicates the version
263
- required, conforming to the format in ``PEP-345``
264
-
265
- :type name: string
266
- :type version: string
267
- """
268
- matcher = None
269
- if version is not None:
270
- try:
271
- matcher = self._scheme.matcher('%s (%s)' % (name, version))
272
- except ValueError:
273
- raise DistlibException('invalid name or version: %r, %r' %
274
- (name, version))
275
-
276
- for dist in self.get_distributions():
277
- # We hit a problem on Travis where enum34 was installed and doesn't
278
- # have a provides attribute ...
279
- if not hasattr(dist, 'provides'):
280
- logger.debug('No "provides": %s', dist)
281
- else:
282
- provided = dist.provides
283
-
284
- for p in provided:
285
- p_name, p_ver = parse_name_and_version(p)
286
- if matcher is None:
287
- if p_name == name:
288
- yield dist
289
- break
290
- else:
291
- if p_name == name and matcher.match(p_ver):
292
- yield dist
293
- break
294
-
295
- def get_file_path(self, name, relative_path):
296
- """
297
- Return the path to a resource file.
298
- """
299
- dist = self.get_distribution(name)
300
- if dist is None:
301
- raise LookupError('no distribution named %r found' % name)
302
- return dist.get_resource_path(relative_path)
303
-
304
- def get_exported_entries(self, category, name=None):
305
- """
306
- Return all of the exported entries in a particular category.
307
-
308
- :param category: The category to search for entries.
309
- :param name: If specified, only entries with that name are returned.
310
- """
311
- for dist in self.get_distributions():
312
- r = dist.exports
313
- if category in r:
314
- d = r[category]
315
- if name is not None:
316
- if name in d:
317
- yield d[name]
318
- else:
319
- for v in d.values():
320
- yield v
321
-
322
-
323
- class Distribution(object):
324
- """
325
- A base class for distributions, whether installed or from indexes.
326
- Either way, it must have some metadata, so that's all that's needed
327
- for construction.
328
- """
329
-
330
- build_time_dependency = False
331
- """
332
- Set to True if it's known to be only a build-time dependency (i.e.
333
- not needed after installation).
334
- """
335
-
336
- requested = False
337
- """A boolean that indicates whether the ``REQUESTED`` metadata file is
338
- present (in other words, whether the package was installed by user
339
- request or it was installed as a dependency)."""
340
-
341
- def __init__(self, metadata):
342
- """
343
- Initialise an instance.
344
- :param metadata: The instance of :class:`Metadata` describing this
345
- distribution.
346
- """
347
- self.metadata = metadata
348
- self.name = metadata.name
349
- self.key = self.name.lower() # for case-insensitive comparisons
350
- self.version = metadata.version
351
- self.locator = None
352
- self.digest = None
353
- self.extras = None # additional features requested
354
- self.context = None # environment marker overrides
355
- self.download_urls = set()
356
- self.digests = {}
357
-
358
- @property
359
- def source_url(self):
360
- """
361
- The source archive download URL for this distribution.
362
- """
363
- return self.metadata.source_url
364
-
365
- download_url = source_url # Backward compatibility
366
-
367
- @property
368
- def name_and_version(self):
369
- """
370
- A utility property which displays the name and version in parentheses.
371
- """
372
- return '%s (%s)' % (self.name, self.version)
373
-
374
- @property
375
- def provides(self):
376
- """
377
- A set of distribution names and versions provided by this distribution.
378
- :return: A set of "name (version)" strings.
379
- """
380
- plist = self.metadata.provides
381
- s = '%s (%s)' % (self.name, self.version)
382
- if s not in plist:
383
- plist.append(s)
384
- return plist
385
-
386
- def _get_requirements(self, req_attr):
387
- md = self.metadata
388
- reqts = getattr(md, req_attr)
389
- logger.debug('%s: got requirements %r from metadata: %r', self.name, req_attr,
390
- reqts)
391
- return set(md.get_requirements(reqts, extras=self.extras,
392
- env=self.context))
393
-
394
- @property
395
- def run_requires(self):
396
- return self._get_requirements('run_requires')
397
-
398
- @property
399
- def meta_requires(self):
400
- return self._get_requirements('meta_requires')
401
-
402
- @property
403
- def build_requires(self):
404
- return self._get_requirements('build_requires')
405
-
406
- @property
407
- def test_requires(self):
408
- return self._get_requirements('test_requires')
409
-
410
- @property
411
- def dev_requires(self):
412
- return self._get_requirements('dev_requires')
413
-
414
- def matches_requirement(self, req):
415
- """
416
- Say if this instance matches (fulfills) a requirement.
417
- :param req: The requirement to match.
418
- :rtype req: str
419
- :return: True if it matches, else False.
420
- """
421
- # Requirement may contain extras - parse to lose those
422
- # from what's passed to the matcher
423
- r = parse_requirement(req)
424
- scheme = get_scheme(self.metadata.scheme)
425
- try:
426
- matcher = scheme.matcher(r.requirement)
427
- except UnsupportedVersionError:
428
- # XXX compat-mode if cannot read the version
429
- logger.warning('could not read version %r - using name only',
430
- req)
431
- name = req.split()[0]
432
- matcher = scheme.matcher(name)
433
-
434
- name = matcher.key # case-insensitive
435
-
436
- result = False
437
- for p in self.provides:
438
- p_name, p_ver = parse_name_and_version(p)
439
- if p_name != name:
440
- continue
441
- try:
442
- result = matcher.match(p_ver)
443
- break
444
- except UnsupportedVersionError:
445
- pass
446
- return result
447
-
448
- def __repr__(self):
449
- """
450
- Return a textual representation of this instance,
451
- """
452
- if self.source_url:
453
- suffix = ' [%s]' % self.source_url
454
- else:
455
- suffix = ''
456
- return '<Distribution %s (%s)%s>' % (self.name, self.version, suffix)
457
-
458
- def __eq__(self, other):
459
- """
460
- See if this distribution is the same as another.
461
- :param other: The distribution to compare with. To be equal to one
462
- another. distributions must have the same type, name,
463
- version and source_url.
464
- :return: True if it is the same, else False.
465
- """
466
- if type(other) is not type(self):
467
- result = False
468
- else:
469
- result = (self.name == other.name and
470
- self.version == other.version and
471
- self.source_url == other.source_url)
472
- return result
473
-
474
- def __hash__(self):
475
- """
476
- Compute hash in a way which matches the equality test.
477
- """
478
- return hash(self.name) + hash(self.version) + hash(self.source_url)
479
-
480
-
481
- class BaseInstalledDistribution(Distribution):
482
- """
483
- This is the base class for installed distributions (whether PEP 376 or
484
- legacy).
485
- """
486
-
487
- hasher = None
488
-
489
- def __init__(self, metadata, path, env=None):
490
- """
491
- Initialise an instance.
492
- :param metadata: An instance of :class:`Metadata` which describes the
493
- distribution. This will normally have been initialised
494
- from a metadata file in the ``path``.
495
- :param path: The path of the ``.dist-info`` or ``.egg-info``
496
- directory for the distribution.
497
- :param env: This is normally the :class:`DistributionPath`
498
- instance where this distribution was found.
499
- """
500
- super(BaseInstalledDistribution, self).__init__(metadata)
501
- self.path = path
502
- self.dist_path = env
503
-
504
- def get_hash(self, data, hasher=None):
505
- """
506
- Get the hash of some data, using a particular hash algorithm, if
507
- specified.
508
-
509
- :param data: The data to be hashed.
510
- :type data: bytes
511
- :param hasher: The name of a hash implementation, supported by hashlib,
512
- or ``None``. Examples of valid values are ``'sha1'``,
513
- ``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and
514
- ``'sha512'``. If no hasher is specified, the ``hasher``
515
- attribute of the :class:`InstalledDistribution` instance
516
- is used. If the hasher is determined to be ``None``, MD5
517
- is used as the hashing algorithm.
518
- :returns: The hash of the data. If a hasher was explicitly specified,
519
- the returned hash will be prefixed with the specified hasher
520
- followed by '='.
521
- :rtype: str
522
- """
523
- if hasher is None:
524
- hasher = self.hasher
525
- if hasher is None:
526
- hasher = hashlib.md5
527
- prefix = ''
528
- else:
529
- hasher = getattr(hashlib, hasher)
530
- prefix = '%s=' % self.hasher
531
- digest = hasher(data).digest()
532
- digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii')
533
- return '%s%s' % (prefix, digest)
534
-
535
-
536
- class InstalledDistribution(BaseInstalledDistribution):
537
- """
538
- Created with the *path* of the ``.dist-info`` directory provided to the
539
- constructor. It reads the metadata contained in ``pydist.json`` when it is
540
- instantiated., or uses a passed in Metadata instance (useful for when
541
- dry-run mode is being used).
542
- """
543
-
544
- hasher = 'sha256'
545
-
546
- def __init__(self, path, metadata=None, env=None):
547
- self.modules = []
548
- self.finder = finder = resources.finder_for_path(path)
549
- if finder is None:
550
- raise ValueError('finder unavailable for %s' % path)
551
- if env and env._cache_enabled and path in env._cache.path:
552
- metadata = env._cache.path[path].metadata
553
- elif metadata is None:
554
- r = finder.find(METADATA_FILENAME)
555
- # Temporary - for Wheel 0.23 support
556
- if r is None:
557
- r = finder.find(WHEEL_METADATA_FILENAME)
558
- # Temporary - for legacy support
559
- if r is None:
560
- r = finder.find(LEGACY_METADATA_FILENAME)
561
- if r is None:
562
- raise ValueError('no %s found in %s' % (METADATA_FILENAME,
563
- path))
564
- with contextlib.closing(r.as_stream()) as stream:
565
- metadata = Metadata(fileobj=stream, scheme='legacy')
566
-
567
- super(InstalledDistribution, self).__init__(metadata, path, env)
568
-
569
- if env and env._cache_enabled:
570
- env._cache.add(self)
571
-
572
- r = finder.find('REQUESTED')
573
- self.requested = r is not None
574
- p = os.path.join(path, 'top_level.txt')
575
- if os.path.exists(p):
576
- with open(p, 'rb') as f:
577
- data = f.read().decode('utf-8')
578
- self.modules = data.splitlines()
579
-
580
- def __repr__(self):
581
- return '<InstalledDistribution %r %s at %r>' % (
582
- self.name, self.version, self.path)
583
-
584
- def __str__(self):
585
- return "%s %s" % (self.name, self.version)
586
-
587
- def _get_records(self):
588
- """
589
- Get the list of installed files for the distribution
590
- :return: A list of tuples of path, hash and size. Note that hash and
591
- size might be ``None`` for some entries. The path is exactly
592
- as stored in the file (which is as in PEP 376).
593
- """
594
- results = []
595
- r = self.get_distinfo_resource('RECORD')
596
- with contextlib.closing(r.as_stream()) as stream:
597
- with CSVReader(stream=stream) as record_reader:
598
- # Base location is parent dir of .dist-info dir
599
- #base_location = os.path.dirname(self.path)
600
- #base_location = os.path.abspath(base_location)
601
- for row in record_reader:
602
- missing = [None for i in range(len(row), 3)]
603
- path, checksum, size = row + missing
604
- #if not os.path.isabs(path):
605
- # path = path.replace('/', os.sep)
606
- # path = os.path.join(base_location, path)
607
- results.append((path, checksum, size))
608
- return results
609
-
610
- @cached_property
611
- def exports(self):
612
- """
613
- Return the information exported by this distribution.
614
- :return: A dictionary of exports, mapping an export category to a dict
615
- of :class:`ExportEntry` instances describing the individual
616
- export entries, and keyed by name.
617
- """
618
- result = {}
619
- r = self.get_distinfo_resource(EXPORTS_FILENAME)
620
- if r:
621
- result = self.read_exports()
622
- return result
623
-
624
- def read_exports(self):
625
- """
626
- Read exports data from a file in .ini format.
627
-
628
- :return: A dictionary of exports, mapping an export category to a list
629
- of :class:`ExportEntry` instances describing the individual
630
- export entries.
631
- """
632
- result = {}
633
- r = self.get_distinfo_resource(EXPORTS_FILENAME)
634
- if r:
635
- with contextlib.closing(r.as_stream()) as stream:
636
- result = read_exports(stream)
637
- return result
638
-
639
- def write_exports(self, exports):
640
- """
641
- Write a dictionary of exports to a file in .ini format.
642
- :param exports: A dictionary of exports, mapping an export category to
643
- a list of :class:`ExportEntry` instances describing the
644
- individual export entries.
645
- """
646
- rf = self.get_distinfo_file(EXPORTS_FILENAME)
647
- with open(rf, 'w') as f:
648
- write_exports(exports, f)
649
-
650
- def get_resource_path(self, relative_path):
651
- """
652
- NOTE: This API may change in the future.
653
-
654
- Return the absolute path to a resource file with the given relative
655
- path.
656
-
657
- :param relative_path: The path, relative to .dist-info, of the resource
658
- of interest.
659
- :return: The absolute path where the resource is to be found.
660
- """
661
- r = self.get_distinfo_resource('RESOURCES')
662
- with contextlib.closing(r.as_stream()) as stream:
663
- with CSVReader(stream=stream) as resources_reader:
664
- for relative, destination in resources_reader:
665
- if relative == relative_path:
666
- return destination
667
- raise KeyError('no resource file with relative path %r '
668
- 'is installed' % relative_path)
669
-
670
- def list_installed_files(self):
671
- """
672
- Iterates over the ``RECORD`` entries and returns a tuple
673
- ``(path, hash, size)`` for each line.
674
-
675
- :returns: iterator of (path, hash, size)
676
- """
677
- for result in self._get_records():
678
- yield result
679
-
680
- def write_installed_files(self, paths, prefix, dry_run=False):
681
- """
682
- Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any
683
- existing ``RECORD`` file is silently overwritten.
684
-
685
- prefix is used to determine when to write absolute paths.
686
- """
687
- prefix = os.path.join(prefix, '')
688
- base = os.path.dirname(self.path)
689
- base_under_prefix = base.startswith(prefix)
690
- base = os.path.join(base, '')
691
- record_path = self.get_distinfo_file('RECORD')
692
- logger.info('creating %s', record_path)
693
- if dry_run:
694
- return None
695
- with CSVWriter(record_path) as writer:
696
- for path in paths:
697
- if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')):
698
- # do not put size and hash, as in PEP-376
699
- hash_value = size = ''
700
- else:
701
- size = '%d' % os.path.getsize(path)
702
- with open(path, 'rb') as fp:
703
- hash_value = self.get_hash(fp.read())
704
- if path.startswith(base) or (base_under_prefix and
705
- path.startswith(prefix)):
706
- path = os.path.relpath(path, base)
707
- writer.writerow((path, hash_value, size))
708
-
709
- # add the RECORD file itself
710
- if record_path.startswith(base):
711
- record_path = os.path.relpath(record_path, base)
712
- writer.writerow((record_path, '', ''))
713
- return record_path
714
-
715
- def check_installed_files(self):
716
- """
717
- Checks that the hashes and sizes of the files in ``RECORD`` are
718
- matched by the files themselves. Returns a (possibly empty) list of
719
- mismatches. Each entry in the mismatch list will be a tuple consisting
720
- of the path, 'exists', 'size' or 'hash' according to what didn't match
721
- (existence is checked first, then size, then hash), the expected
722
- value and the actual value.
723
- """
724
- mismatches = []
725
- base = os.path.dirname(self.path)
726
- record_path = self.get_distinfo_file('RECORD')
727
- for path, hash_value, size in self.list_installed_files():
728
- if not os.path.isabs(path):
729
- path = os.path.join(base, path)
730
- if path == record_path:
731
- continue
732
- if not os.path.exists(path):
733
- mismatches.append((path, 'exists', True, False))
734
- elif os.path.isfile(path):
735
- actual_size = str(os.path.getsize(path))
736
- if size and actual_size != size:
737
- mismatches.append((path, 'size', size, actual_size))
738
- elif hash_value:
739
- if '=' in hash_value:
740
- hasher = hash_value.split('=', 1)[0]
741
- else:
742
- hasher = None
743
-
744
- with open(path, 'rb') as f:
745
- actual_hash = self.get_hash(f.read(), hasher)
746
- if actual_hash != hash_value:
747
- mismatches.append((path, 'hash', hash_value, actual_hash))
748
- return mismatches
749
-
750
- @cached_property
751
- def shared_locations(self):
752
- """
753
- A dictionary of shared locations whose keys are in the set 'prefix',
754
- 'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'.
755
- The corresponding value is the absolute path of that category for
756
- this distribution, and takes into account any paths selected by the
757
- user at installation time (e.g. via command-line arguments). In the
758
- case of the 'namespace' key, this would be a list of absolute paths
759
- for the roots of namespace packages in this distribution.
760
-
761
- The first time this property is accessed, the relevant information is
762
- read from the SHARED file in the .dist-info directory.
763
- """
764
- result = {}
765
- shared_path = os.path.join(self.path, 'SHARED')
766
- if os.path.isfile(shared_path):
767
- with codecs.open(shared_path, 'r', encoding='utf-8') as f:
768
- lines = f.read().splitlines()
769
- for line in lines:
770
- key, value = line.split('=', 1)
771
- if key == 'namespace':
772
- result.setdefault(key, []).append(value)
773
- else:
774
- result[key] = value
775
- return result
776
-
777
- def write_shared_locations(self, paths, dry_run=False):
778
- """
779
- Write shared location information to the SHARED file in .dist-info.
780
- :param paths: A dictionary as described in the documentation for
781
- :meth:`shared_locations`.
782
- :param dry_run: If True, the action is logged but no file is actually
783
- written.
784
- :return: The path of the file written to.
785
- """
786
- shared_path = os.path.join(self.path, 'SHARED')
787
- logger.info('creating %s', shared_path)
788
- if dry_run:
789
- return None
790
- lines = []
791
- for key in ('prefix', 'lib', 'headers', 'scripts', 'data'):
792
- path = paths[key]
793
- if os.path.isdir(paths[key]):
794
- lines.append('%s=%s' % (key, path))
795
- for ns in paths.get('namespace', ()):
796
- lines.append('namespace=%s' % ns)
797
-
798
- with codecs.open(shared_path, 'w', encoding='utf-8') as f:
799
- f.write('\n'.join(lines))
800
- return shared_path
801
-
802
- def get_distinfo_resource(self, path):
803
- if path not in DIST_FILES:
804
- raise DistlibException('invalid path for a dist-info file: '
805
- '%r at %r' % (path, self.path))
806
- finder = resources.finder_for_path(self.path)
807
- if finder is None:
808
- raise DistlibException('Unable to get a finder for %s' % self.path)
809
- return finder.find(path)
810
-
811
- def get_distinfo_file(self, path):
812
- """
813
- Returns a path located under the ``.dist-info`` directory. Returns a
814
- string representing the path.
815
-
816
- :parameter path: a ``'/'``-separated path relative to the
817
- ``.dist-info`` directory or an absolute path;
818
- If *path* is an absolute path and doesn't start
819
- with the ``.dist-info`` directory path,
820
- a :class:`DistlibException` is raised
821
- :type path: str
822
- :rtype: str
823
- """
824
- # Check if it is an absolute path # XXX use relpath, add tests
825
- if path.find(os.sep) >= 0:
826
- # it's an absolute path?
827
- distinfo_dirname, path = path.split(os.sep)[-2:]
828
- if distinfo_dirname != self.path.split(os.sep)[-1]:
829
- raise DistlibException(
830
- 'dist-info file %r does not belong to the %r %s '
831
- 'distribution' % (path, self.name, self.version))
832
-
833
- # The file must be relative
834
- if path not in DIST_FILES:
835
- raise DistlibException('invalid path for a dist-info file: '
836
- '%r at %r' % (path, self.path))
837
-
838
- return os.path.join(self.path, path)
839
-
840
- def list_distinfo_files(self):
841
- """
842
- Iterates over the ``RECORD`` entries and returns paths for each line if
843
- the path is pointing to a file located in the ``.dist-info`` directory
844
- or one of its subdirectories.
845
-
846
- :returns: iterator of paths
847
- """
848
- base = os.path.dirname(self.path)
849
- for path, checksum, size in self._get_records():
850
- # XXX add separator or use real relpath algo
851
- if not os.path.isabs(path):
852
- path = os.path.join(base, path)
853
- if path.startswith(self.path):
854
- yield path
855
-
856
- def __eq__(self, other):
857
- return (isinstance(other, InstalledDistribution) and
858
- self.path == other.path)
859
-
860
- # See http://docs.python.org/reference/datamodel#object.__hash__
861
- __hash__ = object.__hash__
862
-
863
-
864
- class EggInfoDistribution(BaseInstalledDistribution):
865
- """Created with the *path* of the ``.egg-info`` directory or file provided
866
- to the constructor. It reads the metadata contained in the file itself, or
867
- if the given path happens to be a directory, the metadata is read from the
868
- file ``PKG-INFO`` under that directory."""
869
-
870
- requested = True # as we have no way of knowing, assume it was
871
- shared_locations = {}
872
-
873
- def __init__(self, path, env=None):
874
- def set_name_and_version(s, n, v):
875
- s.name = n
876
- s.key = n.lower() # for case-insensitive comparisons
877
- s.version = v
878
-
879
- self.path = path
880
- self.dist_path = env
881
- if env and env._cache_enabled and path in env._cache_egg.path:
882
- metadata = env._cache_egg.path[path].metadata
883
- set_name_and_version(self, metadata.name, metadata.version)
884
- else:
885
- metadata = self._get_metadata(path)
886
-
887
- # Need to be set before caching
888
- set_name_and_version(self, metadata.name, metadata.version)
889
-
890
- if env and env._cache_enabled:
891
- env._cache_egg.add(self)
892
- super(EggInfoDistribution, self).__init__(metadata, path, env)
893
-
894
- def _get_metadata(self, path):
895
- requires = None
896
-
897
- def parse_requires_data(data):
898
- """Create a list of dependencies from a requires.txt file.
899
-
900
- *data*: the contents of a setuptools-produced requires.txt file.
901
- """
902
- reqs = []
903
- lines = data.splitlines()
904
- for line in lines:
905
- line = line.strip()
906
- if line.startswith('['):
907
- logger.warning('Unexpected line: quitting requirement scan: %r',
908
- line)
909
- break
910
- r = parse_requirement(line)
911
- if not r:
912
- logger.warning('Not recognised as a requirement: %r', line)
913
- continue
914
- if r.extras:
915
- logger.warning('extra requirements in requires.txt are '
916
- 'not supported')
917
- if not r.constraints:
918
- reqs.append(r.name)
919
- else:
920
- cons = ', '.join('%s%s' % c for c in r.constraints)
921
- reqs.append('%s (%s)' % (r.name, cons))
922
- return reqs
923
-
924
- def parse_requires_path(req_path):
925
- """Create a list of dependencies from a requires.txt file.
926
-
927
- *req_path*: the path to a setuptools-produced requires.txt file.
928
- """
929
-
930
- reqs = []
931
- try:
932
- with codecs.open(req_path, 'r', 'utf-8') as fp:
933
- reqs = parse_requires_data(fp.read())
934
- except IOError:
935
- pass
936
- return reqs
937
-
938
- tl_path = tl_data = None
939
- if path.endswith('.egg'):
940
- if os.path.isdir(path):
941
- p = os.path.join(path, 'EGG-INFO')
942
- meta_path = os.path.join(p, 'PKG-INFO')
943
- metadata = Metadata(path=meta_path, scheme='legacy')
944
- req_path = os.path.join(p, 'requires.txt')
945
- tl_path = os.path.join(p, 'top_level.txt')
946
- requires = parse_requires_path(req_path)
947
- else:
948
- # FIXME handle the case where zipfile is not available
949
- zipf = zipimport.zipimporter(path)
950
- fileobj = StringIO(
951
- zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8'))
952
- metadata = Metadata(fileobj=fileobj, scheme='legacy')
953
- try:
954
- data = zipf.get_data('EGG-INFO/requires.txt')
955
- tl_data = zipf.get_data('EGG-INFO/top_level.txt').decode('utf-8')
956
- requires = parse_requires_data(data.decode('utf-8'))
957
- except IOError:
958
- requires = None
959
- elif path.endswith('.egg-info'):
960
- if os.path.isdir(path):
961
- req_path = os.path.join(path, 'requires.txt')
962
- requires = parse_requires_path(req_path)
963
- path = os.path.join(path, 'PKG-INFO')
964
- tl_path = os.path.join(path, 'top_level.txt')
965
- metadata = Metadata(path=path, scheme='legacy')
966
- else:
967
- raise DistlibException('path must end with .egg-info or .egg, '
968
- 'got %r' % path)
969
-
970
- if requires:
971
- metadata.add_requirements(requires)
972
- # look for top-level modules in top_level.txt, if present
973
- if tl_data is None:
974
- if tl_path is not None and os.path.exists(tl_path):
975
- with open(tl_path, 'rb') as f:
976
- tl_data = f.read().decode('utf-8')
977
- if not tl_data:
978
- tl_data = []
979
- else:
980
- tl_data = tl_data.splitlines()
981
- self.modules = tl_data
982
- return metadata
983
-
984
- def __repr__(self):
985
- return '<EggInfoDistribution %r %s at %r>' % (
986
- self.name, self.version, self.path)
987
-
988
- def __str__(self):
989
- return "%s %s" % (self.name, self.version)
990
-
991
- def check_installed_files(self):
992
- """
993
- Checks that the hashes and sizes of the files in ``RECORD`` are
994
- matched by the files themselves. Returns a (possibly empty) list of
995
- mismatches. Each entry in the mismatch list will be a tuple consisting
996
- of the path, 'exists', 'size' or 'hash' according to what didn't match
997
- (existence is checked first, then size, then hash), the expected
998
- value and the actual value.
999
- """
1000
- mismatches = []
1001
- record_path = os.path.join(self.path, 'installed-files.txt')
1002
- if os.path.exists(record_path):
1003
- for path, _, _ in self.list_installed_files():
1004
- if path == record_path:
1005
- continue
1006
- if not os.path.exists(path):
1007
- mismatches.append((path, 'exists', True, False))
1008
- return mismatches
1009
-
1010
- def list_installed_files(self):
1011
- """
1012
- Iterates over the ``installed-files.txt`` entries and returns a tuple
1013
- ``(path, hash, size)`` for each line.
1014
-
1015
- :returns: a list of (path, hash, size)
1016
- """
1017
-
1018
- def _md5(path):
1019
- f = open(path, 'rb')
1020
- try:
1021
- content = f.read()
1022
- finally:
1023
- f.close()
1024
- return hashlib.md5(content).hexdigest()
1025
-
1026
- def _size(path):
1027
- return os.stat(path).st_size
1028
-
1029
- record_path = os.path.join(self.path, 'installed-files.txt')
1030
- result = []
1031
- if os.path.exists(record_path):
1032
- with codecs.open(record_path, 'r', encoding='utf-8') as f:
1033
- for line in f:
1034
- line = line.strip()
1035
- p = os.path.normpath(os.path.join(self.path, line))
1036
- # "./" is present as a marker between installed files
1037
- # and installation metadata files
1038
- if not os.path.exists(p):
1039
- logger.warning('Non-existent file: %s', p)
1040
- if p.endswith(('.pyc', '.pyo')):
1041
- continue
1042
- #otherwise fall through and fail
1043
- if not os.path.isdir(p):
1044
- result.append((p, _md5(p), _size(p)))
1045
- result.append((record_path, None, None))
1046
- return result
1047
-
1048
- def list_distinfo_files(self, absolute=False):
1049
- """
1050
- Iterates over the ``installed-files.txt`` entries and returns paths for
1051
- each line if the path is pointing to a file located in the
1052
- ``.egg-info`` directory or one of its subdirectories.
1053
-
1054
- :parameter absolute: If *absolute* is ``True``, each returned path is
1055
- transformed into a local absolute path. Otherwise the
1056
- raw value from ``installed-files.txt`` is returned.
1057
- :type absolute: boolean
1058
- :returns: iterator of paths
1059
- """
1060
- record_path = os.path.join(self.path, 'installed-files.txt')
1061
- if os.path.exists(record_path):
1062
- skip = True
1063
- with codecs.open(record_path, 'r', encoding='utf-8') as f:
1064
- for line in f:
1065
- line = line.strip()
1066
- if line == './':
1067
- skip = False
1068
- continue
1069
- if not skip:
1070
- p = os.path.normpath(os.path.join(self.path, line))
1071
- if p.startswith(self.path):
1072
- if absolute:
1073
- yield p
1074
- else:
1075
- yield line
1076
-
1077
- def __eq__(self, other):
1078
- return (isinstance(other, EggInfoDistribution) and
1079
- self.path == other.path)
1080
-
1081
- # See http://docs.python.org/reference/datamodel#object.__hash__
1082
- __hash__ = object.__hash__
1083
-
1084
- new_dist_class = InstalledDistribution
1085
- old_dist_class = EggInfoDistribution
1086
-
1087
-
1088
- class DependencyGraph(object):
1089
- """
1090
- Represents a dependency graph between distributions.
1091
-
1092
- The dependency relationships are stored in an ``adjacency_list`` that maps
1093
- distributions to a list of ``(other, label)`` tuples where ``other``
1094
- is a distribution and the edge is labeled with ``label`` (i.e. the version
1095
- specifier, if such was provided). Also, for more efficient traversal, for
1096
- every distribution ``x``, a list of predecessors is kept in
1097
- ``reverse_list[x]``. An edge from distribution ``a`` to
1098
- distribution ``b`` means that ``a`` depends on ``b``. If any missing
1099
- dependencies are found, they are stored in ``missing``, which is a
1100
- dictionary that maps distributions to a list of requirements that were not
1101
- provided by any other distributions.
1102
- """
1103
-
1104
- def __init__(self):
1105
- self.adjacency_list = {}
1106
- self.reverse_list = {}
1107
- self.missing = {}
1108
-
1109
- def add_distribution(self, distribution):
1110
- """Add the *distribution* to the graph.
1111
-
1112
- :type distribution: :class:`distutils2.database.InstalledDistribution`
1113
- or :class:`distutils2.database.EggInfoDistribution`
1114
- """
1115
- self.adjacency_list[distribution] = []
1116
- self.reverse_list[distribution] = []
1117
- #self.missing[distribution] = []
1118
-
1119
- def add_edge(self, x, y, label=None):
1120
- """Add an edge from distribution *x* to distribution *y* with the given
1121
- *label*.
1122
-
1123
- :type x: :class:`distutils2.database.InstalledDistribution` or
1124
- :class:`distutils2.database.EggInfoDistribution`
1125
- :type y: :class:`distutils2.database.InstalledDistribution` or
1126
- :class:`distutils2.database.EggInfoDistribution`
1127
- :type label: ``str`` or ``None``
1128
- """
1129
- self.adjacency_list[x].append((y, label))
1130
- # multiple edges are allowed, so be careful
1131
- if x not in self.reverse_list[y]:
1132
- self.reverse_list[y].append(x)
1133
-
1134
- def add_missing(self, distribution, requirement):
1135
- """
1136
- Add a missing *requirement* for the given *distribution*.
1137
-
1138
- :type distribution: :class:`distutils2.database.InstalledDistribution`
1139
- or :class:`distutils2.database.EggInfoDistribution`
1140
- :type requirement: ``str``
1141
- """
1142
- logger.debug('%s missing %r', distribution, requirement)
1143
- self.missing.setdefault(distribution, []).append(requirement)
1144
-
1145
- def _repr_dist(self, dist):
1146
- return '%s %s' % (dist.name, dist.version)
1147
-
1148
- def repr_node(self, dist, level=1):
1149
- """Prints only a subgraph"""
1150
- output = [self._repr_dist(dist)]
1151
- for other, label in self.adjacency_list[dist]:
1152
- dist = self._repr_dist(other)
1153
- if label is not None:
1154
- dist = '%s [%s]' % (dist, label)
1155
- output.append(' ' * level + str(dist))
1156
- suboutput = self.repr_node(other, level + 1)
1157
- subs = suboutput.split('\n')
1158
- output.extend(subs[1:])
1159
- return '\n'.join(output)
1160
-
1161
- def to_dot(self, f, skip_disconnected=True):
1162
- """Writes a DOT output for the graph to the provided file *f*.
1163
-
1164
- If *skip_disconnected* is set to ``True``, then all distributions
1165
- that are not dependent on any other distribution are skipped.
1166
-
1167
- :type f: has to support ``file``-like operations
1168
- :type skip_disconnected: ``bool``
1169
- """
1170
- disconnected = []
1171
-
1172
- f.write("digraph dependencies {\n")
1173
- for dist, adjs in self.adjacency_list.items():
1174
- if len(adjs) == 0 and not skip_disconnected:
1175
- disconnected.append(dist)
1176
- for other, label in adjs:
1177
- if not label is None:
1178
- f.write('"%s" -> "%s" [label="%s"]\n' %
1179
- (dist.name, other.name, label))
1180
- else:
1181
- f.write('"%s" -> "%s"\n' % (dist.name, other.name))
1182
- if not skip_disconnected and len(disconnected) > 0:
1183
- f.write('subgraph disconnected {\n')
1184
- f.write('label = "Disconnected"\n')
1185
- f.write('bgcolor = red\n')
1186
-
1187
- for dist in disconnected:
1188
- f.write('"%s"' % dist.name)
1189
- f.write('\n')
1190
- f.write('}\n')
1191
- f.write('}\n')
1192
-
1193
- def topological_sort(self):
1194
- """
1195
- Perform a topological sort of the graph.
1196
- :return: A tuple, the first element of which is a topologically sorted
1197
- list of distributions, and the second element of which is a
1198
- list of distributions that cannot be sorted because they have
1199
- circular dependencies and so form a cycle.
1200
- """
1201
- result = []
1202
- # Make a shallow copy of the adjacency list
1203
- alist = {}
1204
- for k, v in self.adjacency_list.items():
1205
- alist[k] = v[:]
1206
- while True:
1207
- # See what we can remove in this run
1208
- to_remove = []
1209
- for k, v in list(alist.items())[:]:
1210
- if not v:
1211
- to_remove.append(k)
1212
- del alist[k]
1213
- if not to_remove:
1214
- # What's left in alist (if anything) is a cycle.
1215
- break
1216
- # Remove from the adjacency list of others
1217
- for k, v in alist.items():
1218
- alist[k] = [(d, r) for d, r in v if d not in to_remove]
1219
- logger.debug('Moving to result: %s',
1220
- ['%s (%s)' % (d.name, d.version) for d in to_remove])
1221
- result.extend(to_remove)
1222
- return result, list(alist.keys())
1223
-
1224
- def __repr__(self):
1225
- """Representation of the graph"""
1226
- output = []
1227
- for dist, adjs in self.adjacency_list.items():
1228
- output.append(self.repr_node(dist))
1229
- return '\n'.join(output)
1230
-
1231
-
1232
- def make_graph(dists, scheme='default'):
1233
- """Makes a dependency graph from the given distributions.
1234
-
1235
- :parameter dists: a list of distributions
1236
- :type dists: list of :class:`distutils2.database.InstalledDistribution` and
1237
- :class:`distutils2.database.EggInfoDistribution` instances
1238
- :rtype: a :class:`DependencyGraph` instance
1239
- """
1240
- scheme = get_scheme(scheme)
1241
- graph = DependencyGraph()
1242
- provided = {} # maps names to lists of (version, dist) tuples
1243
-
1244
- # first, build the graph and find out what's provided
1245
- for dist in dists:
1246
- graph.add_distribution(dist)
1247
-
1248
- for p in dist.provides:
1249
- name, version = parse_name_and_version(p)
1250
- logger.debug('Add to provided: %s, %s, %s', name, version, dist)
1251
- provided.setdefault(name, []).append((version, dist))
1252
-
1253
- # now make the edges
1254
- for dist in dists:
1255
- requires = (dist.run_requires | dist.meta_requires |
1256
- dist.build_requires | dist.dev_requires)
1257
- for req in requires:
1258
- try:
1259
- matcher = scheme.matcher(req)
1260
- except UnsupportedVersionError:
1261
- # XXX compat-mode if cannot read the version
1262
- logger.warning('could not read version %r - using name only',
1263
- req)
1264
- name = req.split()[0]
1265
- matcher = scheme.matcher(name)
1266
-
1267
- name = matcher.key # case-insensitive
1268
-
1269
- matched = False
1270
- if name in provided:
1271
- for version, provider in provided[name]:
1272
- try:
1273
- match = matcher.match(version)
1274
- except UnsupportedVersionError:
1275
- match = False
1276
-
1277
- if match:
1278
- graph.add_edge(dist, provider, req)
1279
- matched = True
1280
- break
1281
- if not matched:
1282
- graph.add_missing(dist, req)
1283
- return graph
1284
-
1285
-
1286
- def get_dependent_dists(dists, dist):
1287
- """Recursively generate a list of distributions from *dists* that are
1288
- dependent on *dist*.
1289
-
1290
- :param dists: a list of distributions
1291
- :param dist: a distribution, member of *dists* for which we are interested
1292
- """
1293
- if dist not in dists:
1294
- raise DistlibException('given distribution %r is not a member '
1295
- 'of the list' % dist.name)
1296
- graph = make_graph(dists)
1297
-
1298
- dep = [dist] # dependent distributions
1299
- todo = graph.reverse_list[dist] # list of nodes we should inspect
1300
-
1301
- while todo:
1302
- d = todo.pop()
1303
- dep.append(d)
1304
- for succ in graph.reverse_list[d]:
1305
- if succ not in dep:
1306
- todo.append(succ)
1307
-
1308
- dep.pop(0) # remove dist from dep, was there to prevent infinite loops
1309
- return dep
1310
-
1311
-
1312
- def get_required_dists(dists, dist):
1313
- """Recursively generate a list of distributions from *dists* that are
1314
- required by *dist*.
1315
-
1316
- :param dists: a list of distributions
1317
- :param dist: a distribution, member of *dists* for which we are interested
1318
- in finding the dependencies.
1319
- """
1320
- if dist not in dists:
1321
- raise DistlibException('given distribution %r is not a member '
1322
- 'of the list' % dist.name)
1323
- graph = make_graph(dists)
1324
-
1325
- req = set() # required distributions
1326
- todo = graph.adjacency_list[dist] # list of nodes we should inspect
1327
- seen = set(t[0] for t in todo) # already added to todo
1328
-
1329
- while todo:
1330
- d = todo.pop()[0]
1331
- req.add(d)
1332
- pred_list = graph.adjacency_list[d]
1333
- for pred in pred_list:
1334
- d = pred[0]
1335
- if d not in req and d not in seen:
1336
- seen.add(d)
1337
- todo.append(pred)
1338
- return req
1339
-
1340
-
1341
- def make_dist(name, version, **kwargs):
1342
- """
1343
- A convenience method for making a dist given just a name and version.
1344
- """
1345
- summary = kwargs.pop('summary', 'Placeholder for summary')
1346
- md = Metadata(**kwargs)
1347
- md.name = name
1348
- md.version = version
1349
- md.summary = summary or 'Placeholder for summary'
1350
- return Distribution(md)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Billius/runwayml-stable-diffusion-v1-5-04-07-2023/app.py DELETED
@@ -1,3 +0,0 @@
1
- import gradio as gr
2
-
3
- gr.Interface.load("models/runwayml/stable-diffusion-v1-5").launch()
 
 
 
 
spaces/Blessin/impro-scene-generator/app.py DELETED
@@ -1,69 +0,0 @@
1
- import gradio as gr
2
- import openai
3
-
4
- def generate_script(api_key, name1, name2, situation):
5
- # Initialize OpenAI API with the provided key
6
- openai.api_key = api_key
7
-
8
- # Define the example script to set the context
9
- example_script = (
10
- "Setting: A cafe\n\n"
11
- "Scene:\n\n"
12
- "(Lights come up on a cozy little coffee shop. LOUIS and GIRL are sitting at a small table, with two untouched coffees between them. "
13
- "It's clear from their expressions that the awkwardness from the previous scene hasn't worn off.)\n\n"
14
- "LOUIS. (Attempting to break the ice) So, um... do you come to this coffee shop often?\n"
15
- "GIRL. (Coldly) No, not really.\n"
16
- "LOUIS. Oh. Well, they have the best almond croissants here. You should try one, sometime.\n"
17
- "GIRL. (Slightly warming up) I'm more of a bagel person, actually.\n"
18
- "LOUIS. Really? Me too! What's your favorite kind?\n"
19
- "GIRL. Plain. With cream cheese.\n"
20
- "LOUIS. Nice choice. I like mine with a bit of jam.\n"
21
- "(A brief pause as they both sip their coffee, seemingly more at ease.)\n\n"
22
- "LOUIS. (Hesitant) Look, I'm sorry about earlier... I don't know why I was acting like such a jerk.\n"
23
- "GIRL. (Softening) It's alright. People can act strange when they meet someone new.\n"
24
- "LOUIS. Yeah. It's just... I've been feeling kind of... out of place lately. And I guess I was trying too hard to impress you.\n"
25
- "GIRL. You don't have to impress me, Louis. Just be yourself.\n"
26
- "(LOUIS smiles shyly and looks down at his coffee. The GIRL does too, and for a moment, there's a comfortable silence between them.)\n\n"
27
- "LOUIS. So... do you have any plans for the weekend?\n"
28
- "GIRL. (Smiling) Not much. Just taking my dog to the park. Maybe catch a movie later. How about you?\n"
29
- "LOUIS. Same here. Minus the dog, though.\n"
30
- "GIRL. (Laughs) Well, maybe you can join me. And who knows, you might just enjoy the simple joy of a walk in the park.\n"
31
- "LOUIS. (Smiling) I'd like that.\n\n"
32
- "(As they continue chatting, the lights dim, suggesting the budding of a new connection between LOUIS and GIRL.)\n\n"
33
- "(Blackout)\n"
34
- )
35
-
36
- # Define the prompt to transform the user's inputs into a stage play script
37
- prompt_text = (
38
- f"{example_script}\n\n"
39
- f"Generate a new stage play script based on the following details:\n"
40
- f"Location: Cafe\n"
41
- f"Character 1: {name1}\n"
42
- f"Character 2: {name2}\n"
43
- f"Situation: {situation}\n"
44
- )
45
-
46
- # Use OpenAI's Completion endpoint to generate the stage play script
47
- response = openai.Completion.create(engine="davinci", prompt=prompt_text, max_tokens=500)
48
- script = response.choices[0].text.strip()
49
-
50
- return script
51
-
52
- # Define Gradio Interface
53
- iface = gr.Interface(
54
- fn=generate_script,
55
- inputs=[
56
- gr.components.Textbox(label="OpenAI API Key", type="password"),
57
- gr.components.Textbox(label="Name 1", type="text"),
58
- gr.components.Textbox(label="Name 2", type="text"),
59
- gr.components.Textbox(label="Situation", type="text"),
60
- ],
61
- outputs=gr.components.Textbox(label="Generated Stage Play Script", type="text"),
62
- live=True,
63
- title="Stage Play Script Generator",
64
- description="Generate a stage play script set in a cafe based on given characters and situation using OpenAI!",
65
- progress="Generating stage play script...",
66
- )
67
-
68
- # Launch the Gradio app
69
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVH-vn1210/make_hair/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: MiniGPT-4
3
- emoji: 🚀
4
- colorFrom: purple
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.27.0
8
- app_file: app.py
9
- pinned: false
10
- license: other
11
- duplicated_from: Vision-CAIR/minigpt4
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVH-vn1210/make_hair/minigpt4/datasets/datasets/cc_combine_dataset.py DELETED
@@ -1,53 +0,0 @@
1
- """
2
- Copyright (c) 2022, salesforce.com, inc.
3
- All rights reserved.
4
- SPDX-License-Identifier: BSD-3-Clause
5
- For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
6
- """
7
- import os
8
- from PIL import Image
9
- import webdataset as wds
10
- from minigpt4.datasets.datasets.base_dataset import BaseDataset
11
- from minigpt4.datasets.datasets.caption_datasets import CaptionDataset
12
-
13
-
14
- class CCCombineDataset(BaseDataset):
15
- def __init__(self, vis_processor, text_processor, location):
16
- super().__init__(vis_processor=vis_processor, text_processor=text_processor)
17
-
18
- self.inner_dataset = wds.DataPipeline(
19
- wds.ResampledShards(location),
20
- wds.tarfile_to_samples(handler=wds.warn_and_continue),
21
- wds.shuffle(1000, handler=wds.warn_and_continue),
22
- wds.decode("pilrgb", handler=wds.warn_and_continue),
23
- wds.to_tuple("jpg", "json", handler=wds.warn_and_continue),
24
- wds.map_tuple(self.vis_processor, handler=wds.warn_and_continue),
25
- wds.map(self.to_dict, handler=wds.warn_and_continue),
26
- )
27
-
28
- def to_dict(self, sample):
29
- return {
30
- "image": sample[0],
31
- "text_input": self.text_processor(sample[1]["caption"]),
32
- }
33
-
34
-
35
- class CCAlignDataset(CaptionDataset):
36
-
37
- def __getitem__(self, index):
38
-
39
- # TODO this assumes image input, not general enough
40
- ann = self.annotation[index]
41
-
42
- img_file = '{}.jpg'.format(ann["image_id"])
43
- image_path = os.path.join(self.vis_root, img_file)
44
- image = Image.open(image_path).convert("RGB")
45
-
46
- image = self.vis_processor(image)
47
- caption = ann["caption"]
48
-
49
- return {
50
- "image": image,
51
- "text_input": caption,
52
- "image_id": self.img_ids[ann["image_id"]],
53
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/.github/ISSUE_TEMPLATE.md DELETED
@@ -1,5 +0,0 @@
1
-
2
- Please select an issue template from
3
- https://github.com/facebookresearch/detectron2/issues/new/choose .
4
-
5
- Otherwise your issue will be closed.
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/TridentNet/tridentnet/trident_rcnn.py DELETED
@@ -1,110 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- from detectron2.layers import batched_nms
3
- from detectron2.modeling import ROI_HEADS_REGISTRY, StandardROIHeads
4
- from detectron2.modeling.roi_heads.roi_heads import Res5ROIHeads
5
- from detectron2.structures import Instances
6
-
7
-
8
- def merge_branch_instances(instances, num_branch, nms_thrsh, topk_per_image):
9
- """
10
- Merge detection results from different branches of TridentNet.
11
- Return detection results by applying non-maximum suppression (NMS) on bounding boxes
12
- and keep the unsuppressed boxes and other instances (e.g mask) if any.
13
-
14
- Args:
15
- instances (list[Instances]): A list of N * num_branch instances that store detection
16
- results. Contain N images and each image has num_branch instances.
17
- num_branch (int): Number of branches used for merging detection results for each image.
18
- nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1].
19
- topk_per_image (int): The number of top scoring detections to return. Set < 0 to return
20
- all detections.
21
-
22
- Returns:
23
- results: (list[Instances]): A list of N instances, one for each image in the batch,
24
- that stores the topk most confidence detections after merging results from multiple
25
- branches.
26
- """
27
- if num_branch == 1:
28
- return instances
29
-
30
- batch_size = len(instances) // num_branch
31
- results = []
32
- for i in range(batch_size):
33
- instance = Instances.cat([instances[i + batch_size * j] for j in range(num_branch)])
34
-
35
- # Apply per-class NMS
36
- keep = batched_nms(
37
- instance.pred_boxes.tensor, instance.scores, instance.pred_classes, nms_thrsh
38
- )
39
- keep = keep[:topk_per_image]
40
- result = instance[keep]
41
-
42
- results.append(result)
43
-
44
- return results
45
-
46
-
47
- @ROI_HEADS_REGISTRY.register()
48
- class TridentRes5ROIHeads(Res5ROIHeads):
49
- """
50
- The TridentNet ROIHeads in a typical "C4" R-CNN model.
51
- See :class:`Res5ROIHeads`.
52
- """
53
-
54
- def __init__(self, cfg, input_shape):
55
- super().__init__(cfg, input_shape)
56
-
57
- self.num_branch = cfg.MODEL.TRIDENT.NUM_BRANCH
58
- self.trident_fast = cfg.MODEL.TRIDENT.TEST_BRANCH_IDX != -1
59
-
60
- def forward(self, images, features, proposals, targets=None):
61
- """
62
- See :class:`Res5ROIHeads.forward`.
63
- """
64
- num_branch = self.num_branch if self.training or not self.trident_fast else 1
65
- all_targets = targets * num_branch if targets is not None else None
66
- pred_instances, losses = super().forward(images, features, proposals, all_targets)
67
- del images, all_targets, targets
68
-
69
- if self.training:
70
- return pred_instances, losses
71
- else:
72
- pred_instances = merge_branch_instances(
73
- pred_instances, num_branch, self.test_nms_thresh, self.test_detections_per_img
74
- )
75
-
76
- return pred_instances, {}
77
-
78
-
79
- @ROI_HEADS_REGISTRY.register()
80
- class TridentStandardROIHeads(StandardROIHeads):
81
- """
82
- The `StandardROIHeads` for TridentNet.
83
- See :class:`StandardROIHeads`.
84
- """
85
-
86
- def __init__(self, cfg, input_shape):
87
- super(TridentStandardROIHeads, self).__init__(cfg, input_shape)
88
-
89
- self.num_branch = cfg.MODEL.TRIDENT.NUM_BRANCH
90
- self.trident_fast = cfg.MODEL.TRIDENT.TEST_BRANCH_IDX != -1
91
-
92
- def forward(self, images, features, proposals, targets=None):
93
- """
94
- See :class:`Res5ROIHeads.forward`.
95
- """
96
- # Use 1 branch if using trident_fast during inference.
97
- num_branch = self.num_branch if self.training or not self.trident_fast else 1
98
- # Duplicate targets for all branches in TridentNet.
99
- all_targets = targets * num_branch if targets is not None else None
100
- pred_instances, losses = super().forward(images, features, proposals, all_targets)
101
- del images, all_targets, targets
102
-
103
- if self.training:
104
- return pred_instances, losses
105
- else:
106
- pred_instances = merge_branch_instances(
107
- pred_instances, num_branch, self.test_nms_thresh, self.test_detections_per_img
108
- )
109
-
110
- return pred_instances, {}