parquet-converter commited on
Commit
b62bc84
·
1 Parent(s): b3a2c6f

Update parquet files (step 36 of 296)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/101-5/gpt4free/g4f/.v1/gpt4free/quora/mail.py +0 -80
  2. spaces/123Kumar/vits-uma-genshin-honkai123/README.md +0 -11
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/ACDSee Ultimate v9.0.565 x64 [deepstatus] Serial Key keygen The Ultimate Guide to ACDSee Photo Editing Software.md +0 -172
  4. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Command. .Conquer.Red.Alert2.Yuris.Revenge.REUP fitgirl repack Tips and tricks for mastering the game and defeating Yuri.md +0 -17
  5. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Crack WinZip 24.0 What You Need to Know Before You Click.md +0 -26
  6. spaces/1gistliPinn/ChatGPT4/Examples/BIOMUTANT Password.md +0 -13
  7. spaces/1gistliPinn/ChatGPT4/Examples/Download Warcraft 3 Full _HOT_ Ko Can Cai Datl.md +0 -8
  8. spaces/1line/AutoGPT/tests/browse_tests.py +0 -26
  9. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/3D Driving Class Game A Fun and Educational Driving Game for All Ages.md +0 -147
  10. spaces/1phancelerku/anime-remove-background/Enjoy Mortal Kombat 4 APK on Your Android Phone or Tablet.md +0 -102
  11. spaces/4Taps/SadTalker/README.md +0 -15
  12. spaces/801artistry/RVC801/demucs/audio.py +0 -172
  13. spaces/801artistry/RVC801/tools/torchgate/__init__.py +0 -12
  14. spaces/AIFILMS/StyleGANEX/datasets/inference_dataset.py +0 -22
  15. spaces/AIGC-Audio/Make_An_Audio/ldm/models/autoencoder.py +0 -474
  16. spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/CLAP/clap.py +0 -89
  17. spaces/AIWaves/Debate/src/agents/evolve.py +0 -17
  18. spaces/Abhilashvj/planogram-compliance/utils.py +0 -61
  19. spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/hteyun.py +0 -34
  20. spaces/Aer0xander/sd-to-diffusers/README.md +0 -14
  21. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/holygrail/Factory.d.ts +0 -5
  22. spaces/AlgoveraAI/dcgan-crypto-punks/README.md +0 -38
  23. spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/configs/base.py +0 -56
  24. spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/eval_ijbc.py +0 -483
  25. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_if.py +0 -1257
  26. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py +0 -486
  27. spaces/Andy1621/uniformer_image_detection/configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py +0 -92
  28. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/docker/Dockerfile +0 -75
  29. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/openai/errors.py +0 -31
  30. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/exp/upernet_global_small/test_config_g.py +0 -38
  31. spaces/Anonymous-sub/Rerender/ControlNet/gradio_scribble2image.py +0 -92
  32. spaces/Ariharasudhan/YoloV5/utils/plots.py +0 -575
  33. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/namespaces.py +0 -107
  34. spaces/Audio-AGI/AudioSep/models/CLAP/training/distributed.py +0 -150
  35. spaces/Bart92/RVC_HF/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py +0 -90
  36. spaces/Benson/text-generation/Examples/Cmo Descargar Gratis Fuego Mx En El Ordenador Porttil Sin Bluestacks.md +0 -39
  37. spaces/Benson/text-generation/Examples/Descargar Bluecurve Home App.md +0 -76
  38. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/formatters/_mapping.py +0 -23
  39. spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/pyparsing/common.py +0 -424
  40. spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/reduce_by_key.h +0 -23
  41. spaces/CVPR/WALT/mmdet/datasets/coco.py +0 -548
  42. spaces/CVPR/regionclip-demo/detectron2/engine/launch.py +0 -125
  43. spaces/CikeyQI/Yunzai/Yunzai/lib/tools/command.js +0 -118
  44. spaces/CikeyQI/meme-api/meme_generator/memes/douyin/__init__.py +0 -78
  45. spaces/Cletrason/Cletrason-toad-mario-movie/gradio_utils.py +0 -98
  46. spaces/CofAI/chat/g4f/Provider/Providers/DeepAi.py +0 -46
  47. spaces/CuriousDolphin/MobileSAM/README.md +0 -45
  48. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/mtiLib/__init__.py +0 -1402
  49. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio_client/serializing.py +0 -548
  50. spaces/DShrimp/PoseMaker/src/model.py +0 -219
spaces/101-5/gpt4free/g4f/.v1/gpt4free/quora/mail.py DELETED
@@ -1,80 +0,0 @@
1
- from json import loads
2
- from re import findall
3
- from time import sleep
4
-
5
- from fake_useragent import UserAgent
6
- from requests import Session
7
-
8
-
9
- class Emailnator:
10
- def __init__(self) -> None:
11
- self.client = Session()
12
- self.client.get("https://www.emailnator.com/", timeout=6)
13
- self.cookies = self.client.cookies.get_dict()
14
-
15
- self.client.headers = {
16
- "authority": "www.emailnator.com",
17
- "origin": "https://www.emailnator.com",
18
- "referer": "https://www.emailnator.com/",
19
- "user-agent": UserAgent().random,
20
- "x-xsrf-token": self.client.cookies.get("XSRF-TOKEN")[:-3] + "=",
21
- }
22
-
23
- self.email = None
24
-
25
- def get_mail(self):
26
- response = self.client.post(
27
- "https://www.emailnator.com/generate-email",
28
- json={
29
- "email": [
30
- "domain",
31
- "plusGmail",
32
- "dotGmail",
33
- ]
34
- },
35
- )
36
-
37
- self.email = loads(response.text)["email"][0]
38
- return self.email
39
-
40
- def get_message(self):
41
- print("Waiting for message...")
42
-
43
- while True:
44
- sleep(2)
45
- mail_token = self.client.post("https://www.emailnator.com/message-list", json={"email": self.email})
46
-
47
- mail_token = loads(mail_token.text)["messageData"]
48
-
49
- if len(mail_token) == 2:
50
- print("Message received!")
51
- print(mail_token[1]["messageID"])
52
- break
53
-
54
- mail_context = self.client.post(
55
- "https://www.emailnator.com/message-list",
56
- json={
57
- "email": self.email,
58
- "messageID": mail_token[1]["messageID"],
59
- },
60
- )
61
-
62
- return mail_context.text
63
-
64
- def get_verification_code(self):
65
- message = self.get_message()
66
- code = findall(r';">(\d{6,7})</div>', message)[0]
67
- print(f"Verification code: {code}")
68
- return code
69
-
70
- def clear_inbox(self):
71
- print("Clearing inbox...")
72
- self.client.post(
73
- "https://www.emailnator.com/delete-all",
74
- json={"email": self.email},
75
- )
76
- print("Inbox cleared!")
77
-
78
- def __del__(self):
79
- if self.email:
80
- self.clear_inbox()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/123Kumar/vits-uma-genshin-honkai123/README.md DELETED
@@ -1,11 +0,0 @@
1
- ---
2
- license: apache-2.0
3
- title: ' vits-uma-genshin-honkai'
4
- sdk: gradio
5
- sdk_version: 3.7
6
- emoji: 🐨
7
- colorTo: yellow
8
- pinned: false
9
- app_file: app.py
10
- duplicated_from: ikechan8370/vits-uma-genshin-honkai
11
- ---
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/ACDSee Ultimate v9.0.565 x64 [deepstatus] Serial Key keygen The Ultimate Guide to ACDSee Photo Editing Software.md DELETED
@@ -1,172 +0,0 @@
1
-
2
- <h1>ACDSee Ultimate v9.0.565 x64 [deepstatus] Serial Key keygen</h1>
3
- <p>If you are looking for a powerful and versatile photo editing and management software, you might want to check out ACDSee Ultimate v9.0.565 x64 [deepstatus] Serial Key keygen. This is a torrent file that contains the full version of ACDSee Ultimate v9.0.565 x64, a serial key to activate it, and a keygen to generate more serial keys if needed.</p>
4
- <h2>ACDSee Ultimate v9.0.565 x64 [deepstatus] Serial Key keygen</h2><br /><p><b><b>Download File</b> &mdash; <a href="https://byltly.com/2uKwj0">https://byltly.com/2uKwj0</a></b></p><br /><br />
5
- <h2>Introduction</h2>
6
- <p>In this article, we will explain what ACDSee Ultimate is, what are its features, why you need a serial key and a keygen for it, how to download and install it, and how to use it for photo editing and management.</p>
7
- <h3>What is ACDSee Ultimate?</h3>
8
- <p>ACDSee Ultimate is a comprehensive photo editing and management software that offers everything you need to work with your digital images. It combines the features of ACDSee Pro, a professional photo editor and organizer, with ACDSee Photo Editor, a creative photo editor and enhancer.</p>
9
- <p>With ACDSee Ultimate, you can import, organize, view, edit, enhance, export, and share your photos in one application. You can also work with RAW files, layers, masks, filters, effects, brushes, and more.</p>
10
- <h3>What are the features of ACDSee Ultimate v9.0.565 x64?</h3>
11
- <p>ACDSee Ultimate v9.0.565 x64 is the latest version of ACDSee Ultimate that was released in 2015. It has many features that make it a powerful and versatile photo editing and management software.</p>
12
- <p>Some of the features of ACDSee Ultimate v9.0.565 x64 are:</p>
13
- <p>ACDSee Ultimate 9 64-bit photo editing software crack<br />
14
- How to activate ACDSee Ultimate v9.0.565 x64 with serial key<br />
15
- ACDSee Ultimate v9.0.565 x64 [deepstatus] full version download<br />
16
- ACDSee Ultimate v9.0.565 x64 license key generator<br />
17
- ACDSee Ultimate 9 for Windows 10 64-bit torrent<br />
18
- ACDSee Ultimate v9.0.565 x64 [deepstatus] patch<br />
19
- ACDSee Ultimate 9 64-bit free trial activation code<br />
20
- ACDSee Ultimate v9.0.565 x64 [deepstatus] keygen download link<br />
21
- ACDSee Ultimate 9 photo manager and editor crack<br />
22
- ACDSee Ultimate v9.0.565 x64 serial number<br />
23
- ACDSee Ultimate 9 64-bit product key<br />
24
- ACDSee Ultimate v9.0.565 x64 [deepstatus] cracked version<br />
25
- ACDSee Ultimate 9 for Windows 7 64-bit torrent<br />
26
- ACDSee Ultimate v9.0.565 x64 activation key<br />
27
- ACDSee Ultimate 9 photo editing software keygen<br />
28
- ACDSee Ultimate v9.0.565 x64 [deepstatus] registration code<br />
29
- ACDSee Ultimate 9 64-bit license key<br />
30
- ACDSee Ultimate v9.0.565 x64 full crack download<br />
31
- ACDSee Ultimate 9 for Windows 8 64-bit torrent<br />
32
- ACDSee Ultimate v9.0.565 x64 serial key generator<br />
33
- ACDSee Ultimate 9 photo manager and editor keygen<br />
34
- ACDSee Ultimate v9.0.565 x64 [deepstatus] crack download link<br />
35
- ACDSee Ultimate 9 64-bit activation code<br />
36
- ACDSee Ultimate v9.0.565 x64 license code<br />
37
- ACDSee Ultimate 9 photo editing software patch<br />
38
- ACDSee Ultimate v9.0.565 x64 [deepstatus] serial key download<br />
39
- ACDSee Ultimate 9 for Windows XP 64-bit torrent<br />
40
- ACDSee Ultimate v9.0.565 x64 crack file download<br />
41
- ACDSee Ultimate 9 photo manager and editor patch<br />
42
- ACDSee Ultimate v9.0.565 x64 [deepstatus] license key download link<br />
43
- ACDSee Ultimate 9 64-bit registration code<br />
44
- ACDSee Ultimate v9.0.565 x64 serial code<br />
45
- ACDSee Ultimate 9 photo editing software license key generator<br />
46
- ACDSee Ultimate v9.0.565 x64 [deepstatus] activation code download<br />
47
- ACDSee Ultimate 9 for Windows Vista 64-bit torrent<br />
48
- ACDSee Ultimate v9.0.565 x64 keygen file download<br />
49
- ACDSee Ultimate 9 photo manager and editor license key generator<br />
50
- ACDSee Ultimate v9.0.565 x64 [deepstatus] registration code download link<br />
51
- ACDSee Ultimate 9 64-bit license code generator<br />
52
- ACDSee Ultimate v9.0.565 x64 product code<br />
53
- ACDSee Ultimate 9 photo editing software serial key generator<br />
54
- ACDSee Ultimate v9.0.565 x64 [deepstatus] product key download<br />
55
- ACDSee Ultimate 9 for Windows Server 2008 R2 torrent</p>
56
- <ul>
57
- <li>It supports 64-bit architecture for faster performance and larger file handling.</li>
58
- <li>It has a new Develop mode that allows you to process RAW files and make non-destructive adjustments.</li>
59
- <li>It has a new Edit mode that lets you apply creative effects and enhancements to your photos.</li>
60
- <li>It has a new Layers mode that enables you to edit your photos with advanced tools and compositing techniques.</li>
61
- <li>It has a new Smart Brush that lets you apply adjustments and effects selectively with a brush.</li>
62
- <li>It has a new Pixel Targeting feature that lets you select and modify specific pixels based on their color or brightness.</li>
63
- <li>It has a new PicaView feature that lets you preview your photos in Windows Explorer without opening them.</li>
64
- <li>It has a new SeeDrive feature that lets you access and manage your photos on cloud storage services like OneDrive or Dropbox.</li>
65
- <li>It has improved support for 4K monitors, touchscreens, high DPI displays, Windows 10, and more.</li>
66
- </ul>
67
- <h3>Why do you need a serial key and a keygen for ACDSee Ultimate v9.0.565 x64?</h3>
68
- <p>ACDSee Ultimate v9.0.565 x64 is not a free software. It costs $149.99 for a single license or $199.99 for a family pack of five licenses.</p>
69
- <p>If you want to use ACDSee Ultimate v9.0.565 x64 without paying for it, you need a serial key and a keygen for it.</p>
70
- <p>A serial key is a unique code that identifies your license of ACDSee Ultimate v9.0.565 x64 and allows you to activate it online or offline.</p>
71
- <p>A keygen is a program that generates random serial keys for ACDSee Ultimate v9.0.565 x64 based on its algorithm.</p>
72
- <p>By using a serial key and a keygen from [deepstatus], you can bypass the activation process of ACDSee Ultimate v9.0.565 x64 and use it for free.</p>
73
- <h2>How to download and install ACDSee Ultimate v9.0.565 x64 [deepstatus] Serial Key keygen</h2>
74
- <p>If you want to download and install ACDSee Ultimate v9.0.565 x64 [deepstatus] Serial Key keygen, you need to follow these steps:</p>
75
- <h3>Step 1: Download the torrent file from [deepstatus]</h3>
76
- <p>The first step is to download the torrent file from [deepstatus], which is a trusted source of cracked software torrents.</p>
77
- <p>To download the torrent file from [deepstatus], you need to have a torrent client installed on your computer, such as uTorrent or BitTorrent.</p>
78
- <p>You can find the torrent file of ACDSee Ultimate v9.0.565 x64 [deepstatus] Serial Key keygen on this link: https://www.deepstatus.net/torrent/ACDSee-Ultimate-v90565-x64-deepstatus-Serial-Key-keygen/</p>
79
- <p>You can also scan this QR code with your smartphone or tablet to access the link:</p>
80
- <img src="https://chart.googleapis.com/chart?chs=150x150&cht=qr&chl=https://www.deepstatus.net/torrent/ACDSee-Ultimate-v90565-x64-deepstatus-Serial-Key-keygen/" alt="QR code">
81
- <p>Once you have downloaded the torrent file from [deepstatus], you need to open it with your torrent client and start downloading the files of ACDSee Ultimate v9.0.565 x64 [deepstatus] Serial Key keygen.</p>
82
- <h3>Step 2: Extract the files using WinRAR or 7-Zip</h3>
83
- <p>The second step is to extract the files of ACDSee Ultimate v9.0.565 x64 [deepstatus] Serial Key keygen using WinRAR or 7-Zip.</p>
84
- <p>WinRAR and 7-Zip are programs that can compress and decompress files in various formats, such as ZIP or RAR.</p>
85
- <p>You can download WinRAR from this link: https://www.win-rar.com/download.html</p>
86
- <p>You can download 7-Zip from this link: https://www.7-zip.org/download.html</p>
87
- <h3>Step 3: Run the setup file and follow the instructions</h3>
88
- <p>The third step is to run the setup file and follow the instructions to install ACDSee Ultimate v9.0.565 x64 on your computer.</p>
89
- <p>To run the setup file, you need to double-click on the file named "acdsee-ultimate-9-64bit.exe" in the folder where you extracted the files of ACDSee Ultimate v9.0.565 x64 [deepstatus] Serial Key keygen.</p>
90
- <p>This will launch the installation wizard of ACDSee Ultimate v9.0.565 x64, which will guide you through the installation process.</p>
91
- <p>You need to accept the license agreement, choose the destination folder, select the components to install, and click on "Install" to start the installation.</p>
92
- <p>The installation may take a few minutes, depending on your computer's speed and performance.</p>
93
- <h3>Step 4: Use the serial key and the keygen to activate the software</h3>
94
- <p>The fourth and final step is to use the serial key and the keygen to activate ACDSee Ultimate v9.0.565 x64 on your computer.</p>
95
- <p>To use the serial key and the keygen, you need to open the folder named "Crack" in the folder where you extracted the files of ACDSee Ultimate v9.0.565 x64 [deepstatus] Serial Key keygen.</p>
96
- <p>In this folder, you will find two files: "keygen.exe" and "serial.txt".</p>
97
- <p>You need to run the file named "keygen.exe" as administrator by right-clicking on it and choosing "Run as administrator".</p>
98
- <p>This will open a window that shows a random serial key for ACDSee Ultimate v9.0.565 x64.</p>
99
- <p>You need to copy this serial key and paste it in a text editor, such as Notepad or WordPad.</p>
100
- <p>You can also use the serial key that is already provided in the file named "serial.txt" in the same folder.</p>
101
- <p>Once you have copied or noted down a serial key for ACDSee Ultimate v9.0.565 x64, you need to launch the software by double-clicking on its icon on your desktop or in your start menu.</p>
102
- <p>This will open a window that asks you to activate ACDSee Ultimate v9.0.565 x64 online or offline.</p>
103
- <p>You need to choose "Offline Activation" and click on "Next".</p>
104
- <p>This will open another window that shows a request code for ACDSee Ultimate v9.0.565 x64.</p>
105
- <p>You need to copy this request code and paste it in the keygen window where it says "Enter your request code here".</p>
106
- <p>Then, you need to click on "Generate" in the keygen window.</p>
107
- <p>This will generate an activation code for ACDSee Ultimate v9.0.565 x64 based on your request code.</p>
108
- <p>You need to copy this activation code and paste it in the activation window where it says "Enter your activation code here".</p>
109
- <p>Then, you need to click on "Activate" in the activation window.</p>
110
- <p>This will activate ACDSee Ultimate v9.0.565 x64 on your computer and allow you to use it without any limitations.</p>
111
- <h2>How to use ACDSee Ultimate v9.0.565 x64 for photo editing and management</h2>
112
- <p>Now that you have downloaded, installed, and activated ACDSee Ultimate v9.0.565 x64 on your computer, you can start using it for photo editing and management.</p>
113
- <p>In this section, we will show you how to import and organize your photos with ACDSee Ultimate v9.0.565 x64, how to edit your photos with ACDSee Ultimate v9.0.565 x64, and how to export and share your photos with ACDSee Ultimate v9.0.565 x64.</p>
114
- <h3>How to import and organize your photos with ACDSee Ultimate v9.0.565 x64</h3>
115
- <p>The first thing you need to do with ACDSee Ultimate v9.0.565 x64 is to import and organize your photos in its database.</p>
116
- <p>To import and organize your photos with ACDSee Ultimate v9.0.565 x64, you need to follow these steps:</p>
117
- <ol>
118
- <li>Open ACDSee Ultimate v9.0.565 x64 and switch to the Manage mode by clicking on its icon at the top left corner of the interface or pressing F1 on your keyboard.</li>
119
- <li>In the Manage mode, you can see a panel on the left side that shows various sources of your photos, such as folders, devices, cloud services, etc.</li>
120
- <li>To import photos from a folder on your computer or an external drive, you need to browse to that folder in the panel and select it.</li>
121
- <li>To import photos from a device such as a camera or a smartphone, you need to connect that device to your computer via USB cable or Wi-Fi and select it in the panel.</li>
122
- <li>To import photos from a cloud service such as OneDrive or Dropbox, you need to sign in to that service with your account credentials and select it in the panel.</li>
123
- the main area of the interface.</li>
124
- <li>To import photos from the grid to the database of ACDSee Ultimate v9.0.565 x64, you need to select the photos you want to import by clicking on them or using the Ctrl or Shift keys on your keyboard.</li>
125
- <li>Then, you need to click on the "Import" button at the top right corner of the interface or press Ctrl+I on your keyboard.</li>
126
- <li>This will open a window that lets you choose the destination folder, the file name format, the metadata options, and the import options for your photos.</li>
127
- <li>You can also create subfolders, rename files, add keywords, categories, ratings, and color labels to your photos in this window.</li>
128
- <li>Once you have configured the import settings for your photos, you need to click on "Start Import" at the bottom right corner of the window.</li>
129
- <li>This will import your photos to the database of ACDSee Ultimate v9.0.565 x64 and show them in the destination folder in the panel.</li>
130
- </ol>
131
- <p>Once you have imported your photos to the database of ACDSee Ultimate v9.0.565 x64, you can organize them in various ways.</p>
132
- <p>Some of the ways to organize your photos with ACDSee Ultimate v9.0.565 x64 are:</p>
133
- <ul>
134
- <li>You can sort your photos by various criteria, such as name, date, size, rating, etc., by clicking on the "Sort By" button at the top right corner of the interface or pressing Ctrl+T on your keyboard.</li>
135
- <li>You can filter your photos by various criteria, such as keywords, categories, ratings, color labels, etc., by clicking on the "Filter By" button at the top right corner of the interface or pressing Ctrl+F on your keyboard.</li>
136
- <li>You can group your photos by various criteria, such as date, camera model, lens model, etc., by clicking on the "Group By" button at the top right corner of the interface or pressing Ctrl+G on your keyboard.</li>
137
- <li>You can create collections of your photos based on common themes or projects by clicking on the "Collections" tab in the panel and dragging and dropping your photos to a new or existing collection.</li>
138
- <li>You can create smart collections of your photos based on dynamic rules that automatically update as you add or modify your photos by clicking on the "Smart Collections" tab in the panel and creating a new smart collection with a name and a set of rules.</li>
139
- </ul>
140
- <h3>How to edit your photos with ACDSee Ultimate v9.0.565 x64</h3>
141
- <p>The next thing you need to do with ACDSee Ultimate v9.0.565 x64 is to edit your photos to enhance their appearance and quality.</p>
142
- <p>To edit your photos with ACDSee Ultimate v9.0.565 x64, you need to follow these steps:</p>
143
- <ol>
144
- <li>Select a photo you want to edit in the Manage mode by clicking on it or using the arrow keys on your keyboard.</li>
145
- <li>Switch to the Develop mode by clicking on its icon at the top left corner of the interface or pressing F2 on your keyboard.</li>
146
- <li>In the Develop mode, you can see a panel on the right side that shows various tools and settings for processing RAW files and making non-destructive adjustments to your photo.</li>
147
- the bottom right corner of the window.</li>
148
- <li>This will export your photos to the destination folder or share them via email or social media, depending on your choice.</li>
149
- </ol>
150
- <h2>Conclusion</h2>
151
- <p>In this article, we have explained what ACDSee Ultimate is, what are its features, why you need a serial key and a keygen for it, how to download and install it, and how to use it for photo editing and management.</p>
152
- <p>We have also provided a link to download ACDSee Ultimate v9.0.565 x64 [deepstatus] Serial Key keygen, which is a torrent file that contains the full version of ACDSee Ultimate v9.0.565 x64, a serial key to activate it, and a keygen to generate more serial keys if needed.</p>
153
- <p>By using ACDSee Ultimate v9.0.565 x64 [deepstatus] Serial Key keygen, you can enjoy a powerful and versatile photo editing and management software for free.</p>
154
- <p>However, we do not endorse or support piracy or illegal downloading of software. If you like ACDSee Ultimate v9.0.565 x64 and find it useful, you should buy it from its official website: https://www.acdsee.com/en/products/acdsee-ultimate</p>
155
- <p>This way, you can support the developers of ACDSee Ultimate v9.0.565 x64 and get access to updates, support, and more features.</p>
156
- <h2>FAQs</h2>
157
- <p>Here are some frequently asked questions about ACDSee Ultimate v9.0.565 x64 [deepstatus] Serial Key keygen:</p>
158
- <ol>
159
- <li>Q: Is ACDSee Ultimate v9.0.565 x64 compatible with Windows 10?</li>
160
- <li>A: Yes, ACDSee Ultimate v9.0.565 x64 is compatible with Windows 10 and other versions of Windows from Windows 7 to Windows 8.1.</li>
161
- <li>Q: Is ACDSee Ultimate v9.0.565 x64 safe to download and use?</li>
162
- <li>A: ACDSee Ultimate v9.0.565 x64 [deepstatus] Serial Key keygen is safe to download and use as long as you download it from [deepstatus], which is a trusted source of cracked software torrents.</li>
163
- <li>Q: Can I use ACDSee Ultimate v9.0.565 x64 on multiple computers?</li>
164
- <li>A: Yes, you can use ACDSee Ultimate v9.0.565 x64 on multiple computers as long as you have a valid serial key and a keygen for each computer.</li>
165
- <li>Q: Can I update ACDSee Ultimate v9.0.565 x64 to a newer version?</li>
166
- <li>A: No, you cannot update ACDSee Ultimate v9.0.565 x64 to a newer version as it will invalidate your serial key and activation code.</li>
167
- <li>Q: Can I get technical support for ACDSee Ultimate v9.0.565 x64?</li>
168
- <li>A: No, you cannot get technical support for ACDSee Ultimate v9.0.565 x64 as it is an illegal copy of the software.</li>
169
- </ol>
170
- </p> 0a6ba089eb<br />
171
- <br />
172
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Command. .Conquer.Red.Alert2.Yuris.Revenge.REUP fitgirl repack Tips and tricks for mastering the game and defeating Yuri.md DELETED
@@ -1,17 +0,0 @@
1
- <br />
2
- <h1>Command and Conquer Red Alert 2 Yuri's Revenge: A Classic RTS Game</h1>
3
- <p>If you are a fan of real-time strategy (RTS) games, you have probably heard of <strong>Command and Conquer Red Alert 2 Yuri's Revenge</strong>. This game is an expansion pack to <strong>Command and Conquer Red Alert 2</strong>, one of the most popular and acclaimed RTS games of all time. In this article, we will tell you everything you need to know about this classic game, including what it is, why it is so good, how to play it on Windows 10, and more.</p>
4
- <h2>Command. .Conquer.Red.Alert2.Yuri's.Revenge.REUP fitgirl repack</h2><br /><p><b><b>DOWNLOAD</b> &#9675; <a href="https://byltly.com/2uKxKg">https://byltly.com/2uKxKg</a></b></p><br /><br />
5
- <h2>Introduction</h2>
6
- <p><strong>Command and Conquer Red Alert 2 Yuri's Revenge</strong> is an expansion pack to <strong>Command and Conquer Red Alert 2</strong>, a RTS game developed by Westwood Studios and published by Electronic Arts in October 2001 as part of the Command and Conquer series . The game is set in an alternate history where the Soviet Union has invaded the United States, but is defeated by the Allied forces. However, Yuri, the former Soviet advisor who betrayed his leader Romanov, has his own plans for world domination. He has built a secret army of mind-controlled soldiers and devices that can manipulate time and space. The Allies must stop him before he succeeds in his evil scheme.</p>
7
- <p><strong>Command and Conquer Red Alert 2 Yuri's Revenge</strong> is a classic RTS game because it offers a thrilling and engaging gameplay experience that combines strategy, action, humor, and creativity. The game has three playable factions, each with its own unique units, buildings, abilities, and campaign. The game also has a multiplayer mode that allows you to play online or offline with other players. The game has excellent graphics, sound, music, voice acting, and cutscenes that immerse you in the game world. The game also has a vibrant modding community that creates new content for the game.</p>
8
- <p>If you want to play <strong>Command and Conquer Red Alert 2 Yuri's Revenge</strong> on Windows 10, you will need to buy a digital copy of the game from the EA Origin Store or use your physical copy if you have one. You will also need to download and install the CnCNet patch that fixes many bugs and compatibility issues with Windows 10. After that, you can enjoy playing this classic game on your modern PC.</p>
9
- <h2>Gameplay</h2>
10
- <h3>Factions</h3>
11
- <p><strong>Command and Conquer Red Alert 2 Yuri's Revenge</strong> has three playable factions: Allies, Soviets, and Yuri. Each faction has its own strengths, weaknesses, strategies, and playstyles. Here is a brief overview of each faction:</p>
12
- <ul>
13
- <li><strong>Allies</strong>: The Allies are the defenders of freedom and democracy. They have advanced technology and versatile units that can adapt to different situations. They have access to units such as Prism Tanks, Mirage Tanks, Rocketeers, Chrono Legionnaires, Harriers, Dolphins, Aircraft Carriers, IFVs (which can change their weapons depending on what infantry they carry), Spies (which can disguise as enemy infantry), Engineers (which can capture enemy buildings), Tanya (a commando who can destroy buildings with C4 charges), Chrono Miner (a miner that can teleport to ore fields), Weather Control Device (a superweapon that can unleash a devastating lightning storm), Chronosphere (a superweapon that can teleport units anywhere on the map), Force Shield (a defensive structure that can create a temporary invulnerable barrier), Gap Generator (a defensive structure that can create a fog of war around itself), Robot Control Center (a building that allows you to control robotic units such as Terror Drones), Battle Fortress (a heavy transport vehicle that can carry five infantry units), Guardian GI (an anti-tank infantry unit that can deploy into a stationary turret), Sniper (an infantry unit that can kill enemy infantry with one shot), Navy SEAL (a commando who can swim underwater and plant C4 charges on ships), Night Hawk Transport (a stealth transport helicopter), Black Eagle (a fast jet fighter), Grand Cannon (a long-range defensive cannon).</li>
14
- <li><strong>Soviets</strong>: The Soviets are the aggressors of communism and tyranny. They have powerful weapons and brute force units that can overwhelm their enemies. They have access to units such as Rhino Tanks, Apocalypse Tanks, Kirov Airships, Tesla Troopers, Flak Troopers, Desolators (which can deploy into a radiation emitter), Terrorists (which can explode themselves near enemy units or buildings), Crazy Ivans (which can attach bombs to friendly or enemy units or buildings), Boris (a commando who can call airstrikes on enemy buildings), Tesla Coil (a defensive structure that can zap enemy units with electricity), Iron Curtain (a superweapon that can make units invulnerable for a short time), Nuclear Missile Silo (a superweapon that can launch a nuclear missile at any location on the map), Sentry Gun (a defensive structure that can shoot enemy infantry), Flak Cannon (a defensive structure that can shoot enemy aircraft), Industrial Plant (a building that reduces the cost of vehicles by 25%), Siege Chopper (a helicopter that can transform into an artillery unit), Tesla Tank (a tank that fires electric bolts), V3 Launcher (a long-range missile launcher), Terror Drone (a robotic unit that can infiltrate enemy vehicles and destroy them from within), Psychic Sensor (a building that reveals nearby enemy units on the radar), Nuclear Reactor (a power plant that provides more power than other power plants but explodes violently when destroyed).</li>
15
- <li><strong>Yuri</strong>: Yuri is the mastermind of mind control and psychic warfare. He has exotic units and buildings that can manipulate his enemies' minds or use unconventional tactics. He has access to units such as Lasher Tanks</p> 0a6ba089eb<br />
16
- <br />
17
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Crack WinZip 24.0 What You Need to Know Before You Click.md DELETED
@@ -1,26 +0,0 @@
1
-
2
- <h1>How to Download Crack WinZip 24.0 for Free</h1>
3
- <p>WinZip 24.0 is a popular software that allows you to compress, encrypt, and backup your files. It can also help you save disk space, reduce email attachment size, and access your files from anywhere. However, WinZip 24.0 is not a free software and you need to buy a license to use it without any limitations.</p>
4
- <p>But what if you don't want to pay for a license? Is there a way to download crack WinZip 24.0 for free? The answer is yes, but it comes with some risks and drawbacks. In this article, we will explain what crack WinZip 24.0 is, how to download it, and why you should avoid it.</p>
5
- <h2>download crack winzip 24.0</h2><br /><p><b><b>Download</b> &#10037;&#10037;&#10037; <a href="https://byltly.com/2uKA8m">https://byltly.com/2uKA8m</a></b></p><br /><br />
6
- <h2>What is Crack WinZip 24.0?</h2>
7
- <p>Crack WinZip 24.0 is a modified version of WinZip 24.0 that bypasses the activation process and allows you to use the software without a license. Crack WinZip 24.0 usually comes with a keygen, which is a program that generates serial numbers and activation codes for the software.</p>
8
- <p>Crack WinZip 24.0 may sound tempting, but it is illegal and unethical to use. By using crack WinZip 24.0, you are violating the terms and conditions of the software and infringing the intellectual property rights of the developers. You are also depriving them of their rightful income and discouraging them from creating more quality products.</p>
9
- <h2>How to Download Crack WinZip 24.0?</h2>
10
- <p>If you still want to download crack WinZip 24.0, despite the legal and ethical issues, you need to be careful and cautious. Crack WinZip 24.0 is not available on the official website of WinZip or any authorized reseller. You can only find it on some shady websites or torrent sites that offer pirated software.</p>
11
- <p>To download crack WinZip 24.0, you need to search for it on Google or any other search engine and look for the links that claim to provide it. You may find some websites that offer direct downloads or some torrent sites that offer magnet links . You need to click on the link and follow the instructions to download the file.</p>
12
- <p>However, before you download crack WinZip 24.0, you need to be aware of the risks and drawbacks of doing so.</p>
13
- <h2>Why You Should Avoid Crack WinZip 24.0?</h2>
14
- <p>There are many reasons why you should avoid downloading crack WinZip 24.0 and use the official version instead. Here are some of them:</p>
15
- <p></p>
16
- <ul>
17
- <li><b>Crack WinZip 24.0 may contain viruses or malware</b>. Since crack WinZip 24.0 is not verified by any trusted source, there is no guarantee that it is safe and clean. It may contain viruses or malware that can harm your computer or steal your personal information.</li>
18
- <li><b>Crack WinZip 24.0 may not work properly or at all</b>. Since crack WinZip 24.0 is not updated or supported by the developers, there is no guarantee that it will work properly or at all. It may have bugs, errors, or compatibility issues that can affect your user experience or damage your files.</li>
19
- <li><b>Crack WinZip 24.0 may expose you to legal troubles</b>. Since crack WinZip 24.0 is illegal and unethical to use, you may face legal troubles if you are caught using it. You may be sued by the developers or prosecuted by the authorities for violating their rights and laws.</li>
20
- <li><b>Crack WinZip 24.0 may deprive you of the benefits of the official version</b>. Since crack WinZip 24.0 is not registered or activated by the developers, you may miss out on the benefits of the official version such as updates, support, features, security, etc.</li>
21
- </ul>
22
- <h2>Conclusion</h2>
23
- <p>WinZip 24.0 is a powerful software that can help you compress, encrypt, and backup your files. However, it is not a free software and you need to buy a license to use it without any limitations.</p>
24
- <p>If you want to download crack</p> ddb901b051<br />
25
- <br />
26
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/BIOMUTANT Password.md DELETED
@@ -1,13 +0,0 @@
1
- <h2>BIOMUTANT Password</h2><br /><p><b><b>DOWNLOAD</b> &#9745; <a href="https://imgfil.com/2uxXoB">https://imgfil.com/2uxXoB</a></b></p><br /><br />
2
-
3
- Just spotted this on the dashboard, Biomutant is part of a free game this week... with new passwords and a new username and password for this site only for ... BIOMUTANT.
4
- How can I find out who did it?
5
- Did they hack it?What do I need to do?
6
- I don't want them to be able to enter their passwords and download cheats.
7
- Does anyone know what i can do
8
- I don't know why this site uses the Biomutant username and password, but that shouldn't be a problem for you.
9
- If they are using it to hack, you will need to find a way to terminate them.
10
- It might be useful for you if you find a list of available IPs so you can track where they're coming from. 8a78ff9644<br />
11
- <br />
12
- <br />
13
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Download Warcraft 3 Full _HOT_ Ko Can Cai Datl.md DELETED
@@ -1,8 +0,0 @@
1
- <br />
2
- <p>once the operation is completed, you will be taken to the results page which shows you the properties you selected. the properties are listed alphabetically, which makes the process of selecting a property to buy easy. the prices listed also include a premium for a high end property. this is clearly displayed under the price column.</p>
3
- <p>however, there are three additional features included that make the download tool pretty useful. firstly, the search feature is very easy to use. here, you can search for properties by the county in which they are located. you can also narrow down the search results by the number of rooms and the number of off-street parking spots. finally, the browse feature displays properties by counties and property owners.</p>
4
- <h2>Download Warcraft 3 Full Ko Can Cai Datl</h2><br /><p><b><b>DOWNLOAD</b> &mdash;&mdash;&mdash;>>> <a href="https://imgfil.com/2uxX6J">https://imgfil.com/2uxX6J</a></b></p><br /><br />
5
- <p>the final useful feature of the download tool is the ability to create property alerts. this feature enables you to keep up to date with all of the properties you are interested in and what price they have sold for over the past 24 hours.</p>
6
- <p>ebook is the much more popular word than digital book, of course, but for a different reason. in most cases, a book is something you read or buy. a digital book is essentially something that you download. while this much more common now, it takes a while for the distinction to disappear and it is pretty rare to find a book that isn't also available as a digital book. these days, a digital book is something that you can read on your e-reader or tablet, in which case it is called an e-book.<br /> a lot of people like to actually read the words on an actual paper book and the advent of e-books didn't kill this desire. it just means that you can read the book wherever you want to, from any device that is capable of connecting to the web.</p> 899543212b<br />
7
- <br />
8
- <br />
 
 
 
 
 
 
 
 
 
spaces/1line/AutoGPT/tests/browse_tests.py DELETED
@@ -1,26 +0,0 @@
1
- import os
2
- import sys
3
- import unittest
4
-
5
- from bs4 import BeautifulSoup
6
-
7
- sys.path.append(os.path.abspath("../scripts"))
8
-
9
- from browse import extract_hyperlinks
10
-
11
-
12
- class TestBrowseLinks(unittest.TestCase):
13
- def test_extract_hyperlinks(self):
14
- body = """
15
- <body>
16
- <a href="https://google.com">Google</a>
17
- <a href="foo.html">Foo</a>
18
- <div>Some other crap</div>
19
- </body>
20
- """
21
- soup = BeautifulSoup(body, "html.parser")
22
- links = extract_hyperlinks(soup, "http://example.com")
23
- self.assertEqual(
24
- links,
25
- [("Google", "https://google.com"), ("Foo", "http://example.com/foo.html")],
26
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/3D Driving Class Game A Fun and Educational Driving Game for All Ages.md DELETED
@@ -1,147 +0,0 @@
1
-
2
- <h1>Download 3D Driving Class Game: A Fun and Realistic Way to Learn How to Drive</h1>
3
- <p>Do you want to learn how to drive a car in a fun and realistic way? Do you want to practice your driving skills in various scenarios and situations? Do you want to compete with other drivers online and show off your driving abilities? If you answered yes to any of these questions, then you should download 3D Driving Class game, a simulation app that lets you experience driving in a virtual world.</p>
4
- <h2>What is 3D Driving Class Game?</h2>
5
- <p>3D Driving Class game is a simulation app that allows you to practice driving in a virtual world. You can choose from different cars and maps, follow the traffic rules, take driving tests, and earn points. You can also join the online multiplayer mode, chat with other drivers, challenge yourself with different driving modes, and compare your scores with other players on the leaderboards.</p>
6
- <h2>download 3d driving class game</h2><br /><p><b><b>DOWNLOAD</b> &#128504; <a href="https://urlin.us/2uSUWC">https://urlin.us/2uSUWC</a></b></p><br /><br />
7
- <h3>Features of 3D Driving Class Game</h3>
8
- <h4>- Realistic driving scenarios and traffic rules</h4>
9
- <p>One of the main features of 3D Driving Class game is that it offers realistic driving scenarios and traffic rules. You can experience driving in various environments, such as city, countryside, highway, mountain, desert, snow, night, rain, etc. You can also follow the traffic rules, such as speed limit, traffic lights, stop signs, lane change, parking, etc. You can learn how to drive safely and legally in different situations.</p>
10
- <h4>- Various cars and maps to choose from</h4>
11
- <p>Another feature of 3D Driving Class game is that it offers various cars and maps to choose from. You can select from different car models, such as sedan, hatchback, SUV, sports car, bus, truck, etc. You can also customize your car's color, engine, tires, etc. You can also select from different maps, such as Seoul, Busan, Jeju Island, etc. You can explore different places and enjoy the scenery.</p>
12
- <h4>- Online multiplayer mode and leaderboards</h4>
13
- <p>A third feature of 3D Driving Class game is that it offers online multiplayer mode and leaderboards. You can join the online multiplayer mode and chat with other drivers. You can also challenge yourself with different driving modes, such as free drive, time trial, racing, drifting, etc. You can also compare your scores and rankings with other players on the leaderboards. You can show off your driving skills and have fun with other drivers.</p>
14
- <h2>How to Download 3D Driving Class Game?</h2>
15
- <p>If you are interested in downloading 3D Driving Class game, you can do so for your Android, iOS, or PC devices. Here are the steps and requirements for each device:</p>
16
- <h3>Download 3D Driving Class Game for Android</h3>
17
- <h4>- Steps to download from Google Play Store</h4>
18
- <p>If you have an Android device, you can download 3D Driving Class game from the Google Play Store. Here are the steps to do so:</p>
19
- <ol>
20
- <li>Open the Google Play Store app on your Android device.</li>
21
- <li>Search for "3D Driving Class" in the search bar.</li>
22
- <li>Select the app from the list of results and tap on "Install".</li>
23
- <li>Wait for the app to download and install on your device.</li>
24
- <li>Launch the app and enjoy playing 3D Driving Class game.</li>
25
- </ol>
26
- <h4>- Requirements and permissions for Android devices</h4>
27
- <p>To download and play 3D Driving Class game on your Android device, you need to meet the following requirements and permissions:</p>
28
- <p>Download 3D Driving Class - Apps on Google Play<br />
29
- Download Fast&Grand 3D Driving Class Online for Free<br />
30
- Download Driving School 3D Simulator Game<br />
31
- Download 3D Driving Class for PC Windows 10/8/7<br />
32
- Download 3D Driving Class Mod APK Unlimited Money<br />
33
- Download 3D Driving Class Korea Superconducting Tokamak<br />
34
- Download 3D Driving Class Car Truck Driving Mario<br />
35
- Download 3D Driving Class Latest Version 2023<br />
36
- Download 3D Driving Class Offline Data Safety<br />
37
- Download 3D Driving Class Ratings and Reviews<br />
38
- Download 3D Driving Class Simulation Vehicle Car Casual<br />
39
- Download 3D Driving Class John 3:16 Music from Arc North<br />
40
- Download 3D Driving Class YouTube Channel Link<br />
41
- Download 3D Driving Class Airport Airbag New Map<br />
42
- Download 3D Driving Class Car Repair Button Impact Effect<br />
43
- Download 3D Driving Class Crazy Games Game Description<br />
44
- Download 3D Driving Class Cars from France and Germany<br />
45
- Download 3D Driving Class Customize Autos Online<br />
46
- Download 3D Driving Class Compete with Real Drivers<br />
47
- Download 3D Driving Class Seven Stunning Locations<br />
48
- Download 3D Driving Class Explore World's Famous Cities<br />
49
- Download 3D Driving Class Intestinal Functional Test Road Test<br />
50
- Download 3D Driving Class Deduction Points Items Applied<br />
51
- Download 3D Driving Class Single Player Stylized Offline<br />
52
- Download 3D Driving Class No Data Shared Collected Privacy<br />
53
- Download 3D Driving Class Online Multiplayer Free-Roaming Mode<br />
54
- Download 3D Driving Class Traffic Conditions in Real Life<br />
55
- Download 3D Driving Class Obey Traffic Laws Extra Bonuses<br />
56
- Download 3D Driving Class Pit Yourself Against Other Players<br />
57
- Download 3D Driving Class Single Player Campaign Change of Pace<br />
58
- Download 3D Driving Class Twenty-Eight Unique Vehicles Categories<br />
59
- Download 3D Driving Class Pickup Trucks Muscle Car Supercar<br />
60
- Download 3D Driving Class Test Your Skills in Various Vehicles<br />
61
- Download 3D Driving Class Real World Traffic Visiting Cities<br />
62
- Download 3D Driving Class Feedback from Fans Highly Rated <br />
63
- Download 3D Driving Class Exciting New Features Updates <br />
64
- Download 3D Driving Class Web Browser Platform Release Date <br />
65
- Download 3D Driving Class iPad App Store Link Description <br />
66
- Download 3D Driving Class Experience Realistic Driving Simulator <br />
67
- Download 3D Driving Class Practice Your Skills in Various Environments</p>
68
- <ul>
69
- <li>Your device must have Android 4.1 or higher version.</li>
70
- <li>Your device must have at least 100 MB of free storage space.</li>
71
- <li>Your device must have a stable internet connection.</li>
72
- <li>Your device must allow access to photos, media, files, camera, microphone, location, and other features.</li>
73
- </ul>
74
- <h3>Download 3D Driving Class Game for iOS</h3>
75
- <h4>- Steps to download from App Store</h4>
76
- <p>If you have an iOS device, you can download 3D Driving Class game from the App Store. Here are the steps to do so:</p>
77
- <ol>
78
- <li>Open the App Store app on your iOS device.</li>
79
- <li>Search for "3D Driving Class" in the search bar.</li>
80
- <li>Select the app from the list of results and tap on "Get".</li>
81
- <li>Enter your Apple ID and password if prompted.</li>
82
- <li>Wait for the app to download and install on your device.</li>
83
- <li>Launch the app and enjoy playing 3D Driving Class game.</li>
84
- </ol>
85
- <h4>- Requirements and permissions for iOS devices</h4>
86
- <p>To download and play 3D Driving Class game on your iOS device, you need to meet the following requirements and permissions:</p>
87
- <ul>
88
- <li>Your device must have iOS 9.0 or higher version.</li>
89
- <li>Your device must have at least 100 MB of free storage space.</li>
90
- <li>Your device must have a stable internet connection.</li>
91
- <li>Your device must allow access to photos, media, files, camera, microphone, location, and other features.</li>
92
- </ul> <h3>Download 3D Driving Class Game for PC</h3>
93
- <h4>- Steps to download from Web Browser</h4>
94
- <p>If you have a PC device, you can download 3D Driving Class game from the Web Browser. Here are the steps to do so:</p>
95
- <ol>
96
- <li>Open your Web Browser on your PC device.</li>
97
- <li>Go to the official website of 3D Driving Class game: <a href="">https://www.3ddrivingclass.com/</a></li>
98
- <li>Click on the "Download" button on the homepage.</li>
99
- <li>Select the version of the game that suits your PC device (Windows or Mac).</li>
100
- <li>Save the file to your preferred location on your PC device.</li>
101
- <li>Run the file and follow the instructions to install the game on your PC device.</li>
102
- <li>Launch the game and enjoy playing 3D Driving Class game.</li>
103
- </ol>
104
- <h4>- Requirements and permissions for PC devices</h4>
105
- <p>To download and play 3D Driving Class game on your PC device, you need to meet the following requirements and permissions:</p>
106
- <ul>
107
- <li>Your device must have Windows 7 or higher version or Mac OS X 10.9 or higher version.</li>
108
- <li>Your device must have at least 2 GB of RAM and 500 MB of free storage space.</li>
109
- <li>Your device must have a stable internet connection.</li>
110
- <li>Your device must allow access to photos, media, files, camera, microphone, location, and other features.</li>
111
- </ul>
112
- <h2>Tips and Tricks for Playing 3D Driving Class Game</h2>
113
- <p>Now that you have downloaded 3D Driving Class game, you might be wondering how to play it and improve your driving skills. Here are some tips and tricks for playing 3D Driving Class game:</p>
114
- <h3>How to Pass the Driving Tests and Earn Points</h3>
115
- <h4>- Follow the traffic rules and signs</h4>
116
- <p>The first tip for playing 3D Driving Class game is to follow the traffic rules and signs. You can learn how to drive safely and legally by following the speed limit, traffic lights, stop signs, lane change, parking, etc. You can also take driving tests that will evaluate your driving knowledge and skills. You can earn points by passing the driving tests and completing the missions.</p>
117
- <h4>- Use the indicators, mirrors, and brakes properly</h4>
118
- <p>The second tip for playing 3D Driving Class game is to use the indicators, mirrors, and brakes properly. You can learn how to drive smoothly and efficiently by using the indicators, mirrors, and brakes properly. You can use the indicators to signal your intentions to other drivers, such as turning left or right, changing lanes, etc. You can use the mirrors to check your surroundings and blind spots, such as rear view mirror, side mirror, etc. You can use the brakes to slow down or stop your car safely, such as normal brake, emergency brake, etc.</p>
119
- <h4>- Avoid collisions and accidents</h4>
120
- <p>The third tip for playing 3D Driving Class game is to avoid collisions and accidents. You can learn how to drive carefully and responsibly by avoiding collisions and accidents. You can avoid collisions and accidents by keeping a safe distance from other cars, pedestrians, animals, objects, etc. You can also avoid collisions and accidents by driving at a reasonable speed, avoiding distractions, being alert, etc.</p> <h3>How to Customize Your Car and Map</h3>
121
- <h4>- Choose from different car models and colors</h4>
122
- <p>The fourth tip for playing 3D Driving Class game is to choose from different car models and colors. You can customize your car according to your preference and style by choosing from different car models and colors. You can choose from different car models, such as sedan, hatchback, SUV, sports car, bus, truck, etc. You can also choose from different car colors, such as red, blue, green, yellow, black, white, etc.</p>
123
- <h4>- Select from various maps and environments</h4>
124
- <p>The fifth tip for playing 3D Driving Class game is to select from various maps and environments. You can customize your map according to your interest and mood by selecting from various maps and environments. You can select from various maps, such as Seoul, Busan, Jeju Island, etc. You can also select from various environments, such as city, countryside, highway, mountain, desert, snow, night, rain, etc.</p>
125
- <h4>- Adjust the camera angle and sound settings</h4>
126
- <p>The sixth tip for playing 3D Driving Class game is to adjust the camera angle and sound settings. You can customize your view and sound according to your comfort and convenience by adjusting the camera angle and sound settings. You can adjust the camera angle to change the perspective of your driving, such as first-person view, third-person view, top-down view, etc. You can also adjust the sound settings to change the volume and quality of your driving, such as engine sound, horn sound, music sound, etc.</p>
127
- <h3>How to Compete with Other Players Online</h3>
128
- <h4>- Join the online multiplayer mode and chat with other drivers</h4>
129
- <p>The seventh tip for playing 3D Driving Class game is to join the online multiplayer mode and chat with other drivers. You can have fun and socialize with other drivers by joining the online multiplayer mode and chat with other drivers. You can join the online multiplayer mode by tapping on the "Online" button on the main menu. You can chat with other drivers by tapping on the "Chat" button on the screen. You can also send emojis and stickers to express your emotions.</p>
130
- <h4>- Challenge yourself with different driving modes and levels</h4>
131
- <p>The eighth tip for playing 3D Driving Class game is to challenge yourself with different driving modes and levels. You can test your driving skills and improve your driving performance by challenging yourself with different driving modes and levels. You can challenge yourself with different driving modes, such as free drive, time trial, racing, drifting, etc. You can also challenge yourself with different driving levels, such as beginner, intermediate, advanced, expert, etc.</p>
132
- <h4>- Compare your scores and rankings with other players on the leaderboards</h4>
133
- <p>The ninth tip for playing 3D Driving Class game is to compare your scores and rankings with other players on the leaderboards. You can measure your progress and achievements by comparing your scores and rankings with other players on the leaderboards. You can compare your scores by tapping on the "Score" button on the screen. You can also compare your rankings by tapping on the "Ranking" button on the screen. You can see how you rank among other players in terms of points, tests passed, missions completed, etc.</p>
134
- <h2>Conclusion</h2>
135
- <p>In conclusion, 3D Driving Class game is a simulation app that lets you experience driving in a virtual world. You can learn how to drive safely and legally in different scenarios and situations. You can also customize your car and map according to your preference and style. You can also have fun and socialize with other drivers online by joining the online multiplayer mode and chat with other drivers. You can also test your driving skills and improve your driving performance by challenging yourself with different driving modes and levels. You can also measure your progress and achievements by comparing your scores and rankings with other players on the leaderboards.</p>
136
- <p>If you are looking for a fun and realistic way to learn how to drive a car, you should download 3D Driving Class game today. It is a simulation app that will teach you how to drive in a virtual world.</p>
137
- <h2>FAQs</h2>
138
- <p>Here are some frequently asked questions about 3D Driving Class game:</p>
139
- <ol>
140
- <li>Q: Is 3D Driving Class game free to download and play?<br>A: Yes, 3D Driving Class game is free to download and play. However, it may contain ads and in-app purchases that require real money.</li>
141
- <li>Q: Is 3D Driving Class game suitable for children?<br>A: Yes, 3D Driving Class game is suitable for children who want to learn how to drive a car in a fun way. However, parental guidance is recommended for some of the content and features of the game.</li>
142
- <li>Q: How can I contact the developer of 3D Driving Class game?<br>A: You can contact the developer of 3D Driving Class game by sending an email to <a href="mailto:[email protected]">[email protected]</a> or by visiting their Facebook page: <a href="https://www.facebook.com/3ddrivingclass/">https://www.facebook.com/3ddrivingclass/</a></li>
143
- <li>Q: How can I update 3D Driving Class game?<br>A: You can update 3D Driving Class game by checking for updates on the Google Play Store, App Store, or Web Browser. You can also enable automatic updates on your device settings.</li>
144
- <li>Q: How can I uninstall 3D Driving Class game?<br>A: You can uninstall 3D Driving Class game by following the usual steps for uninstalling apps on your device. You can also delete the app data and cache on your device settings.</li>
145
- </ol></p> 197e85843d<br />
146
- <br />
147
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Enjoy Mortal Kombat 4 APK on Your Android Phone or Tablet.md DELETED
@@ -1,102 +0,0 @@
1
-
2
- <h1>Mortal Kombat 4 APKCombo: How to Download and Play the Classic Fighting Game on Your Android Device</h1>
3
- <h2>Introduction</h2>
4
- <p>If you are a fan of fighting games, you probably have heard of Mortal Kombat, one of the most popular and influential franchises in the genre. Mortal Kombat is known for its brutal and bloody combat, its iconic characters, and its rich lore and story. One of the best entries in the series is Mortal Kombat 4, which was released in 1997 for arcades and later ported to various platforms, including PlayStation, Nintendo 64, PC, and Game Boy Color.</p>
5
- <p>Mortal Kombat 4 was the first game in the series to use 3D graphics, while still retaining the classic 2D gameplay. It also introduced a weapon system, allowing each character to use a special weapon during fights. The game features 15 playable characters, plus two secret ones, and a variety of stages and modes. The story revolves around the attack of the corrupted Elder God Shinnok, who seeks to conquer all the realms with the help of his loyal servant Quan Chi. The heroes of Earthrealm must stop him and his army of darkness before it is too late.</p>
6
- <h2>mortal kombat 4 apkcombo</h2><br /><p><b><b>DOWNLOAD</b> &#9989; <a href="https://jinyurl.com/2uNNxt">https://jinyurl.com/2uNNxt</a></b></p><br /><br />
7
- <p>But what if you want to play Mortal Kombat 4 on your Android device? Is it possible? The answer is yes, thanks to APKCombo, a website that allows you to download APK files for Android games and apps. In this article, we will show you how to download and play Mortal Kombat 4 APKCombo on your Android device, so you can enjoy this classic fighting game anytime and anywhere.</p>
8
- <h2>How to Download Mortal Kombat 4 APKCombo</h2>
9
- <h3>Step 1: Visit the APKCombo website</h3>
10
- <p>The first step is to visit the <a href="(^1^)">APKCombo website</a>, which is a reliable source for downloading APK files for Android games and apps. APK files are the installation files for Android applications, similar to EXE files for Windows. By downloading APK files from APKCombo, you can access games and apps that are not available on Google Play Store, or that are not compatible with your device.</p>
11
- <h3>Step 2: Search for Mortal Kombat 4</h3>
12
- <p>The next step is to search for Mortal Kombat 4 on the APKCombo website. You can use the search bar at the top of the page, or browse through the categories and filters. You will see a list of results related to Mortal Kombat 4, such as <a href="(^2^)">mortal kombat armagedon</a>, <a href="(^3^)">Guide Mortal Kombat 4</a>, and others. You want to select the one that says <strong>Mortal Kombat</strong>, which is the official name of Mortal Kombat 4 on Android.</p>
13
- <h3>Step 3: Choose the version and device compatibility</h3>
14
- <p>Once you select Mortal Kombat, you will see a page with more information about the game, such as its description, screenshots, ratings, reviews, and more. You will also see a section that says <strong>Download APK - Latest Version</strong>, which shows you the latest version of the game available for download. You can click on it to see more details about the version, such as its size, date, requirements, and compatibility.</p> <p>Before you download the APK file, you should check if it is compatible with your device. You can do this by looking at the <strong>Compatibility</strong> section, which shows you the minimum Android version and the supported architectures for the APK file. For example, if your device has Android 5.0 or higher and an ARMv7 processor, you can download the APK file that says <strong>Android 5.0+ (arm-v7a)</strong>. If your device has a different Android version or architecture, you should look for another APK file that matches your device.</p>
15
- <p>If you are not sure about your device's Android version or architecture, you can check them by going to your device's settings and looking for the <strong>About phone</strong> or <strong>About device</strong> option. There, you will see information such as your device's model, software version, kernel version, and more. You can also use apps like <a href="">CPU-Z</a> or <a href="">Droid Hardware Info</a> to get more details about your device's specifications.</p>
16
- <h3>Step 4: Download the APK file</h3>
17
- <p>Once you have chosen the APK file that is compatible with your device, you can download it by clicking on the <strong>Download APK</strong> button. You will see a pop-up window that asks you to confirm the download and shows you the file name and size. You can click on <strong>OK</strong> to start the download, or <strong>Cancel</strong> to abort it.</p>
18
- <p>The download will start in the background and you can see the progress in your notification bar. Depending on your internet speed and the file size, the download may take a few minutes or longer. You can also pause or resume the download by tapping on the notification.</p>
19
- <p>mortal kombat 4 apk download<br />
20
- mortal kombat 4 apk mod<br />
21
- mortal kombat 4 apk android<br />
22
- mortal kombat 4 apk obb<br />
23
- mortal kombat 4 apk offline<br />
24
- mortal kombat 4 apk free<br />
25
- mortal kombat 4 apk full<br />
26
- mortal kombat 4 apk data<br />
27
- mortal kombat 4 apk latest version<br />
28
- mortal kombat 4 apk revdl<br />
29
- mortal kombat 4 apk for pc<br />
30
- mortal kombat 4 apk no root<br />
31
- mortal kombat 4 apk unlimited money<br />
32
- mortal kombat 4 apk pure<br />
33
- mortal kombat 4 apk old version<br />
34
- mortal kombat 4 apk hack<br />
35
- mortal kombat 4 apk game<br />
36
- mortal kombat 4 apk mirror<br />
37
- mortal kombat 4 apk uptodown<br />
38
- mortal kombat 4 apk mob.org<br />
39
- mortal kombat 4 apk andropalace<br />
40
- mortal kombat 4 apk rexdl<br />
41
- mortal kombat 4 apk highly compressed<br />
42
- mortal kombat 4 apk mega<br />
43
- mortal kombat 4 apk mediafire<br />
44
- mortal kombat 4 apk android oyun club<br />
45
- mortal kombat 4 apk + sd data<br />
46
- mortal kombat 4 apk + data download<br />
47
- mortal kombat 4 apk + obb download<br />
48
- mortal kombat 4 apk + mod download<br />
49
- mortal kombat 4 apkpure download<br />
50
- mortal kombat 4 apkmirror download<br />
51
- mortal kombat 4 apkpure.com download<br />
52
- mortal kombat 4 apkmirror.com download<br />
53
- mortal kombat 4 apkpure mod<br />
54
- mortal kombat 4 apkmirror mod<br />
55
- mortal kombat 4 apkpure hack<br />
56
- mortal kombat 4 apkmirror hack<br />
57
- mortal kombat 4 apkpure offline<br />
58
- mortal kombat 4 apkmirror offline<br />
59
- mortal kombat 4 apkpure online<br />
60
- mortal kombat 4 apkmirror online<br />
61
- mortal kombat 4 apkpure update<br />
62
- mortal kombat 4 apkmirror update<br />
63
- how to download mortal kombat 4 apkcombo <br />
64
- how to install mortal kombat 4 apkcombo <br />
65
- how to play mortal kombat 4 apkcombo <br />
66
- how to run mortal kombat 4 apkcombo <br />
67
- how to get mortal kombat 4 apkcombo</p>
68
- <h3>Step 5: Install the APK file</h3>
69
- <p>After the download is complete, you can install the APK file by tapping on the notification or by using a file manager app to locate the file in your device's storage. Before you install the APK file, you may need to enable the option to install apps from unknown sources on your device. This is because APK files are not from Google Play Store and may pose a security risk to your device.</p>
70
- <p>To enable this option, go to your device's settings and look for the <strong>Security</strong> or <strong>Privacy</strong> option. There, you will see an option that says <strong>Unknown sources</strong>, <strong>Allow installation of apps from unknown sources</strong>, or something similar. Turn on this option and confirm your choice by tapping on <strong>OK</strong>. You may also see a warning message that tells you about the potential risks of installing apps from unknown sources. Read it carefully and tap on <strong>OK</strong>.</p>
71
- <p>Now, you can install the APK file by tapping on it and following the instructions on the screen. You may see a screen that shows you the permissions that the app requires, such as access to your storage, camera, microphone, etc. Review them carefully and tap on <strong>Install</strong>. The installation will take a few seconds or longer, depending on your device's performance.</p>
72
- <h2>How to Play Mortal Kombat 4 APKCombo</h2>
73
- <h3>Step 1: Launch the game</h3>
74
- <p>After installing the APK file, you can launch the game by tapping on its icon on your home screen or app drawer. You will see a splash screen with the game's logo and then a loading screen with some tips and trivia. Wait for the game to load completely and then tap on the screen to continue.</p>
75
- <h3>Step 2: Choose your game mode and character</h3>
76
- <p>You will see a main menu with several options, such as <strong>New Game</strong>, <strong>Arcade Mode</strong>, <strong>Versus Mode</strong>, <strong>Tournament Mode</strong>, <strong>Practice Mode</strong>, <strong>Cheat Menu</strong>, and <strong>Options</strong>. You can choose any of these options depending on your preference and mood.</p>
77
- <p>If you want to play a single-player campaign with a storyline and cutscenes, choose <strong>New Game</strong>. If you want to play a classic arcade mode with a series of fights against different opponents, choose <strong>Arcade Mode</strong>. If you want to play against another player on the same device, choose <strong>Versus Mode</strong>. If you want to play in a tournament with up to 8 players on the same device, choose <strong>Tournament Mode</strong>. If you want to practice your moves and combos without any pressure, choose <strong>Practice Mode</strong <p>If you want to access some hidden features and cheats, such as changing the difficulty, unlocking characters, and enabling fatalities, choose <strong>Cheat Menu</strong>. If you want to adjust some settings, such as the sound, the controls, and the language, choose <strong>Options</strong>.</p>
78
- <p>After choosing your game mode, you will see a character selection screen with 15 playable characters, plus two secret ones. You can scroll through the characters by swiping left or right on the screen, or by using the arrows at the bottom. You can also see some information about each character, such as their name, origin, fighting style, and weapon. To select a character, tap on their portrait and confirm your choice by tapping on <strong>OK</strong>.</p>
79
- <h3>Step 3: Enjoy the gameplay and features</h3>
80
- <p>Once you have selected your character, you will see a stage selection screen with various locations to fight in. You can choose any stage by tapping on it, or let the game choose one randomly by tapping on <strong>Random</strong>. After choosing your stage, the game will start and you will see your character and your opponent facing each other.</p>
81
- <p>The gameplay of Mortal Kombat 4 APKCombo is similar to the original game, with some adaptations for the touch screen. You can move your character by using a virtual joystick on the left side of the screen, and perform attacks by using buttons on the right side of the screen. You can also use gestures to perform special moves and combos, such as swiping up, down, left, or right. You can see your health bar and your weapon bar at the top of the screen, and your opponent's health bar and weapon bar at the bottom of the screen.</p>
82
- <p>The goal of each fight is to deplete your opponent's health bar before they deplete yours. You can do this by using various attacks, such as punches, kicks, throws, weapons, and special moves. Each character has their own set of moves and abilities that you can discover by experimenting or by consulting the <strong>Move List</strong> option in the pause menu. You can also block your opponent's attacks by pressing the <strong>Block</strong> button on the right side of the screen.</p>
83
- <p>Each fight consists of two rounds, and you need to win two rounds to win the fight. If both you and your opponent have the same amount of health at the end of a round, it will result in a draw and a third round will be played. If you win a fight, you will see a victory pose and a message from your character. If you lose a fight, you will see a defeat pose and a message from your opponent.</p>
84
- <p>One of the most distinctive features of Mortal Kombat 4 APKCombo is the <strong>Fatality</strong>, which is a finishing move that you can perform on your opponent after winning the final round. A fatality is a brutal and gruesome attack that kills your opponent in a spectacular way, such as ripping their head off, impaling them with a weapon, or burning them alive. To perform a fatality, you need to follow a specific sequence of buttons or gestures that varies for each character and each stage. You can find out how to perform fatalities by using the <strong>Cheat Menu</strong> option in the main menu.</p>
85
- <h2>Conclusion</h2>
86
- <p>Mortal Kombat 4 APKCombo is a great way to enjoy one of the best fighting games ever made on your Android device. It offers an authentic and faithful adaptation of the original game, with stunning 3D graphics, smooth gameplay, and tons of features and modes. You can download and play Mortal Kombat 4 APKCombo for free from APKCombo website, which is a reliable source for APK files for Android games and apps. All you need to do is follow the steps we have shown you in this article, and you will be ready to enter the realm of Mortal Kombat.</p>
87
- <h2>FAQs</h2>
88
- <p>Here are some frequently asked questions about Mortal Kombat 4 APKCombo:</p>
89
- <ul>
90
- <li><strong>Q: Is Mortal Kombat 4 APKCombo safe to download and install?</strong></li>
91
- <li><strong>A: Yes,</strong> Mortal Kombat 4 APKCombo is safe to download and install from APKCombo website, which is a trusted source for APK files for Android games and apps. However, you should always be careful when downloading APK files from unknown sources, as they may contain malware or viruses that can harm your device. You should also scan any APK file with an antivirus app before installing it.</li>
92
- <li><strong>Q: Is Mortal Kombat 4 APKCombo legal to download and play?</strong></li>
93
- <li><strong>A: Yes,</strong> Mortal Kombat 4 APKCombo is legal to download and play, as long as you own the original game or have the permission of the developer or publisher. APKCombo does not host or distribute any pirated or illegal content, and only provides links to APK files that are available on other websites. However, you should always respect the intellectual property rights of the creators and owners of the games and apps you download and play.</li>
94
- <li><strong>Q: How can I update Mortal Kombat 4 APKCombo to the latest version?</strong></li>
95
- <li><strong>A: To update Mortal Kombat 4 APKCombo to the latest version,</strong> you need to visit the APKCombo website again and look for the latest version of the game available for download. You can also use the <strong>Update</strong> option in the game's menu, which will redirect you to the APKCombo website. You can then download and install the new APK file over the old one, without losing your progress or data.</li>
96
- <li><strong>Q: How can I uninstall Mortal Kombat 4 APKCombo from my device?</strong></li>
97
- <li><strong>A: To uninstall Mortal Kombat 4 APKCombo from your device,</strong> you need to go to your device's settings and look for the <strong>Apps</strong> or <strong>Applications</strong> option. There, you will see a list of all the apps and games installed on your device. You can scroll through the list and find Mortal Kombat 4 APKCombo, and then tap on it. You will see a screen that shows you some information about the app, such as its size, permissions, storage, and more. You can also see an option that says <strong>Uninstall</strong>. Tap on it and confirm your choice by tapping on <strong>OK</strong>. The app will be removed from your device in a few seconds.</li>
98
- <li><strong>Q: Where can I find more information and tips about Mortal Kombat 4 APKCombo?</strong></li>
99
- <li><strong>A: If you want to find more information and tips about Mortal Kombat 4 APKCombo,</strong> you can visit the official website of the game, which is <a href="">https://www.mortalkombat.com/</a>. There, you can find news, updates, videos, screenshots, wallpapers, forums, and more. You can also visit some fan websites and blogs, such as <a href="">https://www.mortalkombatwarehouse.com/</a>, <a href="">https://www.mksecrets.net/</a>, and <a href="">https://www.mortalkombatonline.com/</a>. There, you can find guides, cheats, secrets, trivia, fan art, fan fiction, and more.</li>
100
- </ul></p> 197e85843d<br />
101
- <br />
102
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/README.md DELETED
@@ -1,15 +0,0 @@
1
- ---
2
- title: SadTalker
3
- emoji: 😭
4
- colorFrom: purple
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.23.0
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- duplicated_from: vinthony/SadTalker
12
- ---
13
-
14
-
15
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/demucs/audio.py DELETED
@@ -1,172 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
- import json
7
- import subprocess as sp
8
- from pathlib import Path
9
-
10
- import julius
11
- import numpy as np
12
- import torch
13
-
14
- from .utils import temp_filenames
15
-
16
-
17
- def _read_info(path):
18
- stdout_data = sp.check_output([
19
- 'ffprobe', "-loglevel", "panic",
20
- str(path), '-print_format', 'json', '-show_format', '-show_streams'
21
- ])
22
- return json.loads(stdout_data.decode('utf-8'))
23
-
24
-
25
- class AudioFile:
26
- """
27
- Allows to read audio from any format supported by ffmpeg, as well as resampling or
28
- converting to mono on the fly. See :method:`read` for more details.
29
- """
30
- def __init__(self, path: Path):
31
- self.path = Path(path)
32
- self._info = None
33
-
34
- def __repr__(self):
35
- features = [("path", self.path)]
36
- features.append(("samplerate", self.samplerate()))
37
- features.append(("channels", self.channels()))
38
- features.append(("streams", len(self)))
39
- features_str = ", ".join(f"{name}={value}" for name, value in features)
40
- return f"AudioFile({features_str})"
41
-
42
- @property
43
- def info(self):
44
- if self._info is None:
45
- self._info = _read_info(self.path)
46
- return self._info
47
-
48
- @property
49
- def duration(self):
50
- return float(self.info['format']['duration'])
51
-
52
- @property
53
- def _audio_streams(self):
54
- return [
55
- index for index, stream in enumerate(self.info["streams"])
56
- if stream["codec_type"] == "audio"
57
- ]
58
-
59
- def __len__(self):
60
- return len(self._audio_streams)
61
-
62
- def channels(self, stream=0):
63
- return int(self.info['streams'][self._audio_streams[stream]]['channels'])
64
-
65
- def samplerate(self, stream=0):
66
- return int(self.info['streams'][self._audio_streams[stream]]['sample_rate'])
67
-
68
- def read(self,
69
- seek_time=None,
70
- duration=None,
71
- streams=slice(None),
72
- samplerate=None,
73
- channels=None,
74
- temp_folder=None):
75
- """
76
- Slightly more efficient implementation than stempeg,
77
- in particular, this will extract all stems at once
78
- rather than having to loop over one file multiple times
79
- for each stream.
80
-
81
- Args:
82
- seek_time (float): seek time in seconds or None if no seeking is needed.
83
- duration (float): duration in seconds to extract or None to extract until the end.
84
- streams (slice, int or list): streams to extract, can be a single int, a list or
85
- a slice. If it is a slice or list, the output will be of size [S, C, T]
86
- with S the number of streams, C the number of channels and T the number of samples.
87
- If it is an int, the output will be [C, T].
88
- samplerate (int): if provided, will resample on the fly. If None, no resampling will
89
- be done. Original sampling rate can be obtained with :method:`samplerate`.
90
- channels (int): if 1, will convert to mono. We do not rely on ffmpeg for that
91
- as ffmpeg automatically scale by +3dB to conserve volume when playing on speakers.
92
- See https://sound.stackexchange.com/a/42710.
93
- Our definition of mono is simply the average of the two channels. Any other
94
- value will be ignored.
95
- temp_folder (str or Path or None): temporary folder to use for decoding.
96
-
97
-
98
- """
99
- streams = np.array(range(len(self)))[streams]
100
- single = not isinstance(streams, np.ndarray)
101
- if single:
102
- streams = [streams]
103
-
104
- if duration is None:
105
- target_size = None
106
- query_duration = None
107
- else:
108
- target_size = int((samplerate or self.samplerate()) * duration)
109
- query_duration = float((target_size + 1) / (samplerate or self.samplerate()))
110
-
111
- with temp_filenames(len(streams)) as filenames:
112
- command = ['ffmpeg', '-y']
113
- command += ['-loglevel', 'panic']
114
- if seek_time:
115
- command += ['-ss', str(seek_time)]
116
- command += ['-i', str(self.path)]
117
- for stream, filename in zip(streams, filenames):
118
- command += ['-map', f'0:{self._audio_streams[stream]}']
119
- if query_duration is not None:
120
- command += ['-t', str(query_duration)]
121
- command += ['-threads', '1']
122
- command += ['-f', 'f32le']
123
- if samplerate is not None:
124
- command += ['-ar', str(samplerate)]
125
- command += [filename]
126
-
127
- sp.run(command, check=True)
128
- wavs = []
129
- for filename in filenames:
130
- wav = np.fromfile(filename, dtype=np.float32)
131
- wav = torch.from_numpy(wav)
132
- wav = wav.view(-1, self.channels()).t()
133
- if channels is not None:
134
- wav = convert_audio_channels(wav, channels)
135
- if target_size is not None:
136
- wav = wav[..., :target_size]
137
- wavs.append(wav)
138
- wav = torch.stack(wavs, dim=0)
139
- if single:
140
- wav = wav[0]
141
- return wav
142
-
143
-
144
- def convert_audio_channels(wav, channels=2):
145
- """Convert audio to the given number of channels."""
146
- *shape, src_channels, length = wav.shape
147
- if src_channels == channels:
148
- pass
149
- elif channels == 1:
150
- # Case 1:
151
- # The caller asked 1-channel audio, but the stream have multiple
152
- # channels, downmix all channels.
153
- wav = wav.mean(dim=-2, keepdim=True)
154
- elif src_channels == 1:
155
- # Case 2:
156
- # The caller asked for multiple channels, but the input file have
157
- # one single channel, replicate the audio over all channels.
158
- wav = wav.expand(*shape, channels, length)
159
- elif src_channels >= channels:
160
- # Case 3:
161
- # The caller asked for multiple channels, and the input file have
162
- # more channels than requested. In that case return the first channels.
163
- wav = wav[..., :channels, :]
164
- else:
165
- # Case 4: What is a reasonable choice here?
166
- raise ValueError('The audio file has less channels than requested but is not mono.')
167
- return wav
168
-
169
-
170
- def convert_audio(wav, from_samplerate, to_samplerate, channels):
171
- wav = convert_audio_channels(wav, channels)
172
- return julius.resample_frac(wav, from_samplerate, to_samplerate)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/tools/torchgate/__init__.py DELETED
@@ -1,12 +0,0 @@
1
- """
2
- TorchGating is a PyTorch-based implementation of Spectral Gating
3
- ================================================
4
- Author: Asaf Zorea
5
-
6
- Contents
7
- --------
8
- torchgate imports all the functions from PyTorch, and in addition provides:
9
- TorchGating --- A PyTorch module that applies a spectral gate to an input signal
10
-
11
- """
12
- from .torchgate import TorchGate
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/StyleGANEX/datasets/inference_dataset.py DELETED
@@ -1,22 +0,0 @@
1
- from torch.utils.data import Dataset
2
- from PIL import Image
3
- from utils import data_utils
4
-
5
-
6
- class InferenceDataset(Dataset):
7
-
8
- def __init__(self, root, opts, transform=None):
9
- self.paths = sorted(data_utils.make_dataset(root))
10
- self.transform = transform
11
- self.opts = opts
12
-
13
- def __len__(self):
14
- return len(self.paths)
15
-
16
- def __getitem__(self, index):
17
- from_path = self.paths[index]
18
- from_im = Image.open(from_path)
19
- from_im = from_im.convert('RGB') if self.opts.label_nc == 0 else from_im.convert('L')
20
- if self.transform:
21
- from_im = self.transform(from_im)
22
- return from_im
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio/ldm/models/autoencoder.py DELETED
@@ -1,474 +0,0 @@
1
- import os
2
- import torch
3
- import pytorch_lightning as pl
4
- import torch.nn.functional as F
5
- from contextlib import contextmanager
6
- from packaging import version
7
- import numpy as np
8
- from ldm.modules.diffusionmodules.model import Encoder, Decoder
9
- from ldm.modules.distributions.distributions import DiagonalGaussianDistribution
10
- from torch.optim.lr_scheduler import LambdaLR
11
- from ldm.util import instantiate_from_config
12
- # from icecream import ic
13
-
14
- class VQModel(pl.LightningModule):
15
- def __init__(self,
16
- ddconfig,
17
- lossconfig,
18
- n_embed,
19
- embed_dim,
20
- ckpt_path=None,
21
- ignore_keys=[],
22
- image_key="image",
23
- colorize_nlabels=None,
24
- monitor=None,
25
- batch_resize_range=None,
26
- scheduler_config=None,
27
- lr_g_factor=1.0,
28
- remap=None,
29
- sane_index_shape=False, # tell vector quantizer to return indices as bhw
30
- use_ema=False
31
- ):
32
- super().__init__()
33
- self.embed_dim = embed_dim
34
- self.n_embed = n_embed
35
- self.image_key = image_key
36
- self.encoder = Encoder(**ddconfig)
37
- self.decoder = Decoder(**ddconfig)
38
- self.loss = instantiate_from_config(lossconfig)
39
- self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25,
40
- remap=remap,
41
- sane_index_shape=sane_index_shape)
42
- self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1)
43
- self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
44
- if colorize_nlabels is not None:
45
- assert type(colorize_nlabels)==int
46
- self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
47
- if monitor is not None:
48
- self.monitor = monitor
49
- self.batch_resize_range = batch_resize_range
50
- if self.batch_resize_range is not None:
51
- print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.")
52
-
53
- self.use_ema = use_ema
54
- if self.use_ema:
55
- self.model_ema = LitEma(self)
56
- print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
57
-
58
- if ckpt_path is not None:
59
- self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
60
- self.scheduler_config = scheduler_config
61
- self.lr_g_factor = lr_g_factor
62
-
63
- @contextmanager
64
- def ema_scope(self, context=None):
65
- if self.use_ema:
66
- self.model_ema.store(self.parameters())
67
- self.model_ema.copy_to(self)
68
- if context is not None:
69
- print(f"{context}: Switched to EMA weights")
70
- try:
71
- yield None
72
- finally:
73
- if self.use_ema:
74
- self.model_ema.restore(self.parameters())
75
- if context is not None:
76
- print(f"{context}: Restored training weights")
77
-
78
- def init_from_ckpt(self, path, ignore_keys=list()):
79
- sd = torch.load(path, map_location="cpu")["state_dict"]
80
- keys = list(sd.keys())
81
- for k in keys:
82
- for ik in ignore_keys:
83
- if k.startswith(ik):
84
- print("Deleting key {} from state_dict.".format(k))
85
- del sd[k]
86
- missing, unexpected = self.load_state_dict(sd, strict=False)
87
- print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
88
- if len(missing) > 0:
89
- print(f"Missing Keys: {missing}")
90
- print(f"Unexpected Keys: {unexpected}")
91
-
92
- def on_train_batch_end(self, *args, **kwargs):
93
- if self.use_ema:
94
- self.model_ema(self)
95
-
96
- def encode(self, x):
97
- h = self.encoder(x)
98
- h = self.quant_conv(h)
99
- quant, emb_loss, info = self.quantize(h)
100
- return quant, emb_loss, info
101
-
102
- def encode_to_prequant(self, x):
103
- h = self.encoder(x)
104
- h = self.quant_conv(h)
105
- return h
106
-
107
- def decode(self, quant):
108
- quant = self.post_quant_conv(quant)
109
- dec = self.decoder(quant)
110
- return dec
111
-
112
- def decode_code(self, code_b):
113
- quant_b = self.quantize.embed_code(code_b)
114
- dec = self.decode(quant_b)
115
- return dec
116
-
117
- def forward(self, input, return_pred_indices=False):
118
- quant, diff, (_,_,ind) = self.encode(input)
119
- dec = self.decode(quant)
120
- if return_pred_indices:
121
- return dec, diff, ind
122
- return dec, diff
123
-
124
- def get_input(self, batch, k):
125
- x = batch[k]
126
- if len(x.shape) == 3:
127
- x = x[..., None]
128
- x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
129
- if self.batch_resize_range is not None:
130
- lower_size = self.batch_resize_range[0]
131
- upper_size = self.batch_resize_range[1]
132
- if self.global_step <= 4:
133
- # do the first few batches with max size to avoid later oom
134
- new_resize = upper_size
135
- else:
136
- new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16))
137
- if new_resize != x.shape[2]:
138
- x = F.interpolate(x, size=new_resize, mode="bicubic")
139
- x = x.detach()
140
- return x
141
-
142
- def training_step(self, batch, batch_idx, optimizer_idx):
143
- # https://github.com/pytorch/pytorch/issues/37142
144
- # try not to fool the heuristics
145
- x = self.get_input(batch, self.image_key)
146
- xrec, qloss, ind = self(x, return_pred_indices=True)
147
-
148
- if optimizer_idx == 0:
149
- # autoencode
150
- aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
151
- last_layer=self.get_last_layer(), split="train",
152
- predicted_indices=ind)
153
-
154
- self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
155
- return aeloss
156
-
157
- if optimizer_idx == 1:
158
- # discriminator
159
- discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
160
- last_layer=self.get_last_layer(), split="train")
161
- self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
162
- return discloss
163
-
164
- def validation_step(self, batch, batch_idx):
165
- log_dict = self._validation_step(batch, batch_idx)
166
- with self.ema_scope():
167
- log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema")
168
- return log_dict
169
-
170
- def _validation_step(self, batch, batch_idx, suffix=""):
171
- x = self.get_input(batch, self.image_key)
172
- xrec, qloss, ind = self(x, return_pred_indices=True)
173
- aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0,
174
- self.global_step,
175
- last_layer=self.get_last_layer(),
176
- split="val"+suffix,
177
- predicted_indices=ind
178
- )
179
-
180
- discloss, log_dict_disc = self.loss(qloss, x, xrec, 1,
181
- self.global_step,
182
- last_layer=self.get_last_layer(),
183
- split="val"+suffix,
184
- predicted_indices=ind
185
- )
186
- rec_loss = log_dict_ae[f"val{suffix}/rec_loss"]
187
- self.log(f"val{suffix}/rec_loss", rec_loss,
188
- prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
189
- self.log(f"val{suffix}/aeloss", aeloss,
190
- prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
191
- if version.parse(pl.__version__) >= version.parse('1.4.0'):
192
- del log_dict_ae[f"val{suffix}/rec_loss"]
193
- self.log_dict(log_dict_ae)
194
- self.log_dict(log_dict_disc)
195
- return self.log_dict
196
-
197
- def test_step(self, batch, batch_idx):
198
- x = self.get_input(batch, self.image_key)
199
- xrec, qloss, ind = self(x, return_pred_indices=True)
200
- reconstructions = (xrec + 1)/2 # to mel scale
201
- test_ckpt_path = os.path.basename(self.trainer.tested_ckpt_path)
202
- savedir = os.path.join(self.trainer.log_dir,f'output_imgs_{test_ckpt_path}','fake_class')
203
- if not os.path.exists(savedir):
204
- os.makedirs(savedir)
205
-
206
- file_names = batch['f_name']
207
- # print(f"reconstructions.shape:{reconstructions.shape}",file_names)
208
- reconstructions = reconstructions.cpu().numpy().squeeze(1) # squuze channel dim
209
- for b in range(reconstructions.shape[0]):
210
- vname_num_split_index = file_names[b].rfind('_')# file_names[b]:video_name+'_'+num
211
- v_n,num = file_names[b][:vname_num_split_index],file_names[b][vname_num_split_index+1:]
212
- save_img_path = os.path.join(savedir,f'{v_n}_sample_{num}.npy')
213
- np.save(save_img_path,reconstructions[b])
214
-
215
- return None
216
-
217
- def configure_optimizers(self):
218
- lr_d = self.learning_rate
219
- lr_g = self.lr_g_factor*self.learning_rate
220
- print("lr_d", lr_d)
221
- print("lr_g", lr_g)
222
- opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
223
- list(self.decoder.parameters())+
224
- list(self.quantize.parameters())+
225
- list(self.quant_conv.parameters())+
226
- list(self.post_quant_conv.parameters()),
227
- lr=lr_g, betas=(0.5, 0.9))
228
- opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
229
- lr=lr_d, betas=(0.5, 0.9))
230
-
231
- if self.scheduler_config is not None:
232
- scheduler = instantiate_from_config(self.scheduler_config)
233
-
234
- print("Setting up LambdaLR scheduler...")
235
- scheduler = [
236
- {
237
- 'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule),
238
- 'interval': 'step',
239
- 'frequency': 1
240
- },
241
- {
242
- 'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule),
243
- 'interval': 'step',
244
- 'frequency': 1
245
- },
246
- ]
247
- return [opt_ae, opt_disc], scheduler
248
- return [opt_ae, opt_disc], []
249
-
250
- def get_last_layer(self):
251
- return self.decoder.conv_out.weight
252
-
253
- def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs):
254
- log = dict()
255
- x = self.get_input(batch, self.image_key)
256
- x = x.to(self.device)
257
- if only_inputs:
258
- log["inputs"] = x
259
- return log
260
- xrec, _ = self(x)
261
- if x.shape[1] > 3:
262
- # colorize with random projection
263
- assert xrec.shape[1] > 3
264
- x = self.to_rgb(x)
265
- xrec = self.to_rgb(xrec)
266
- log["inputs"] = x
267
- log["reconstructions"] = xrec
268
- if plot_ema:
269
- with self.ema_scope():
270
- xrec_ema, _ = self(x)
271
- if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema)
272
- log["reconstructions_ema"] = xrec_ema
273
- return log
274
-
275
- def to_rgb(self, x):
276
- assert self.image_key == "segmentation"
277
- if not hasattr(self, "colorize"):
278
- self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
279
- x = F.conv2d(x, weight=self.colorize)
280
- x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
281
- return x
282
-
283
-
284
- class VQModelInterface(VQModel):
285
- def __init__(self, embed_dim, *args, **kwargs):
286
- super().__init__(embed_dim=embed_dim, *args, **kwargs)
287
- self.embed_dim = embed_dim
288
-
289
- def encode(self, x):# VQModel的quantize写在encoder里,VQModelInterface则将其写在decoder里
290
- h = self.encoder(x)
291
- h = self.quant_conv(h)
292
- return h
293
-
294
- def decode(self, h, force_not_quantize=False):
295
- # also go through quantization layer
296
- if not force_not_quantize:
297
- quant, emb_loss, info = self.quantize(h)
298
- else:
299
- quant = h
300
- quant = self.post_quant_conv(quant)
301
- dec = self.decoder(quant)
302
- return dec
303
-
304
-
305
- class AutoencoderKL(pl.LightningModule):
306
- def __init__(self,
307
- ddconfig,
308
- lossconfig,
309
- embed_dim,
310
- ckpt_path=None,
311
- ignore_keys=[],
312
- image_key="image",
313
- colorize_nlabels=None,
314
- monitor=None,
315
- ):
316
- super().__init__()
317
- self.image_key = image_key
318
- self.encoder = Encoder(**ddconfig)
319
- self.decoder = Decoder(**ddconfig)
320
- self.loss = instantiate_from_config(lossconfig)
321
- assert ddconfig["double_z"]
322
- self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1)
323
- self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
324
- self.embed_dim = embed_dim
325
- if colorize_nlabels is not None:
326
- assert type(colorize_nlabels)==int
327
- self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
328
- if monitor is not None:
329
- self.monitor = monitor
330
- if ckpt_path is not None:
331
- self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
332
- # self.automatic_optimization = False # hjw for debug
333
-
334
- def init_from_ckpt(self, path, ignore_keys=list()):
335
- sd = torch.load(path, map_location="cpu")["state_dict"]
336
- keys = list(sd.keys())
337
- for k in keys:
338
- for ik in ignore_keys:
339
- if k.startswith(ik):
340
- print("Deleting key {} from state_dict.".format(k))
341
- del sd[k]
342
- self.load_state_dict(sd, strict=False)
343
- print(f"Restored from {path}")
344
-
345
- def encode(self, x):
346
- h = self.encoder(x)
347
- moments = self.quant_conv(h)
348
- posterior = DiagonalGaussianDistribution(moments)
349
- return posterior
350
-
351
- def decode(self, z):
352
- z = self.post_quant_conv(z)
353
- dec = self.decoder(z)
354
- return dec
355
-
356
- def forward(self, input, sample_posterior=True):
357
- posterior = self.encode(input)
358
- if sample_posterior:
359
- z = posterior.sample()
360
- else:
361
- z = posterior.mode()
362
- dec = self.decode(z)
363
- return dec, posterior
364
-
365
- def get_input(self, batch, k):
366
- x = batch[k]
367
- if len(x.shape) == 3:
368
- x = x[..., None]
369
- x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
370
- return x
371
-
372
- def training_step(self, batch, batch_idx, optimizer_idx):
373
- inputs = self.get_input(batch, self.image_key)
374
- reconstructions, posterior = self(inputs)
375
-
376
- if optimizer_idx == 0:
377
- # train encoder+decoder+logvar
378
- aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
379
- last_layer=self.get_last_layer(), split="train")
380
- self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
381
- self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)
382
- return aeloss
383
-
384
- if optimizer_idx == 1:
385
- # train the discriminator
386
- discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
387
- last_layer=self.get_last_layer(), split="train")
388
-
389
- self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
390
- self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)
391
- return discloss
392
-
393
- def validation_step(self, batch, batch_idx):
394
- # self.log_images(batch,only_inputs=False,save_dir='mel_result_ae13_26/fake_class')
395
- return self.log_dict
396
-
397
- def test_step(self, batch, batch_idx):
398
- test_ckpt_path = os.path.basename(self.trainer.tested_ckpt_path)
399
- savedir = os.path.join(self.trainer.log_dir,f'output_imgs_{test_ckpt_path}','fake_class')
400
- os.makedirs(savedir,exist_ok=True)
401
- inputs = self.get_input(batch, self.image_key)# inputs shape:(b,c,mel_len,T) or (b,c,h,w)
402
- # ic(inputs.shape)
403
- # inputs = inputs[...,:624]
404
- # ic(inputs.shape)
405
- xrec, posterior = self(inputs)# reconstructions:(b,c,mel_len,T) or (b,c,h,w)
406
- file_names = batch['f_name']
407
- # print(f"reconstructions.shape:{reconstructions.shape}",file_names)
408
- for b in range(len(file_names)):
409
- rcon = (xrec[b].squeeze().detach().cpu().numpy() + 1) / 2 # to mel scale,squeeze channel dim
410
- vname_num_split_index = file_names[b].rfind('_')# file_names[b]:video_name+'_'+num
411
- v_n,num = file_names[b][:vname_num_split_index],file_names[b][vname_num_split_index+1:]
412
- save_img_path = os.path.join(savedir,f'{v_n}_sample_{num}.npy')
413
- np.save(save_img_path,rcon)
414
-
415
- return None
416
-
417
- def configure_optimizers(self):
418
- lr = self.learning_rate
419
- opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
420
- list(self.decoder.parameters())+
421
- list(self.quant_conv.parameters())+
422
- list(self.post_quant_conv.parameters()),
423
- lr=lr, betas=(0.5, 0.9))
424
- opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
425
- lr=lr, betas=(0.5, 0.9))
426
- return [opt_ae, opt_disc], []
427
-
428
- def get_last_layer(self):
429
- return self.decoder.conv_out.weight
430
-
431
- @torch.no_grad()
432
- def log_images(self, batch, only_inputs=False,save_dir = 'mel_result_ae13_26_debug/fake_class', **kwargs): # 在main.py的on_validation_batch_end中调用
433
- log = dict()
434
- x = self.get_input(batch, self.image_key)
435
- x = x.to(self.device)
436
- if not only_inputs:
437
- xrec, posterior = self(x)
438
- if x.shape[1] > 3:
439
- # colorize with random projection
440
- assert xrec.shape[1] > 3
441
- x = self.to_rgb(x)
442
- xrec = self.to_rgb(xrec)
443
- log["samples"] = self.decode(torch.randn_like(posterior.sample()))
444
- log["reconstructions"] = xrec
445
- log["inputs"] = x
446
- return log
447
-
448
- def to_rgb(self, x):
449
- assert self.image_key == "segmentation"
450
- if not hasattr(self, "colorize"):
451
- self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
452
- x = F.conv2d(x, weight=self.colorize)
453
- x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
454
- return x
455
-
456
-
457
- class IdentityFirstStage(torch.nn.Module):
458
- def __init__(self, *args, vq_interface=False, **kwargs):
459
- self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff
460
- super().__init__()
461
-
462
- def encode(self, x, *args, **kwargs):
463
- return x
464
-
465
- def decode(self, x, *args, **kwargs):
466
- return x
467
-
468
- def quantize(self, x, *args, **kwargs):
469
- if self.vq_interface:
470
- return x, None, [None, None, None]
471
- return x
472
-
473
- def forward(self, x, *args, **kwargs):
474
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/CLAP/clap.py DELETED
@@ -1,89 +0,0 @@
1
- import numpy as np
2
- import torch
3
- import torch.nn.functional as F
4
- from torch import nn
5
- from transformers import AutoModel
6
- from .audio import get_audio_encoder
7
-
8
- class Projection(nn.Module):
9
- def __init__(self, d_in: int, d_out: int, p: float=0.5) -> None:
10
- super().__init__()
11
- self.linear1 = nn.Linear(d_in, d_out, bias=False)
12
- self.linear2 = nn.Linear(d_out, d_out, bias=False)
13
- self.layer_norm = nn.LayerNorm(d_out)
14
- self.drop = nn.Dropout(p)
15
-
16
- def forward(self, x: torch.Tensor) -> torch.Tensor:
17
- embed1 = self.linear1(x)
18
- embed2 = self.drop(self.linear2(F.gelu(embed1)))
19
- embeds = self.layer_norm(embed1 + embed2)
20
- return embeds
21
-
22
- class AudioEncoder(nn.Module):
23
- def __init__(self, audioenc_name:str, d_in: int, d_out: int, sample_rate: int, window_size: int,
24
- hop_size: int, mel_bins: int, fmin: int, fmax: int, classes_num: int) -> None:
25
- super().__init__()
26
-
27
- audio_encoder = get_audio_encoder(audioenc_name)
28
-
29
- self.base = audio_encoder(
30
- sample_rate, window_size,
31
- hop_size, mel_bins, fmin, fmax,
32
- classes_num, d_in)
33
-
34
- self.projection = Projection(d_in, d_out)
35
-
36
- def forward(self, x):
37
- out_dict = self.base(x)
38
- audio_features, audio_classification_output = out_dict['embedding'], out_dict['clipwise_output']
39
- projected_vec = self.projection(audio_features)
40
- return projected_vec, audio_classification_output
41
-
42
- class TextEncoder(nn.Module):
43
- def __init__(self, d_out: int, text_model: str, transformer_embed_dim: int) -> None:
44
- super().__init__()
45
- self.base = AutoModel.from_pretrained(text_model)
46
- self.projection = Projection(transformer_embed_dim, d_out)
47
-
48
- def forward(self, x):
49
- out = self.base(**x)[0]
50
- out = out[:, 0, :] # get CLS token output
51
- projected_vec = self.projection(out)
52
- return projected_vec
53
-
54
- class CLAP(nn.Module):
55
- def __init__(self,
56
- # audio
57
- audioenc_name: str,
58
- sample_rate: int,
59
- window_size: int,
60
- hop_size: int,
61
- mel_bins: int,
62
- fmin: int,
63
- fmax: int,
64
- classes_num: int,
65
- out_emb: int,
66
- # text
67
- text_model: str,
68
- transformer_embed_dim: int,
69
- # common
70
- d_proj: int,
71
- ):
72
- super().__init__()
73
-
74
-
75
- self.audio_encoder = AudioEncoder(
76
- audioenc_name, out_emb, d_proj,
77
- sample_rate, window_size, hop_size, mel_bins, fmin, fmax, classes_num)
78
-
79
- self.caption_encoder = TextEncoder(
80
- d_proj, text_model, transformer_embed_dim
81
- )
82
-
83
- self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
84
-
85
- def forward(self, audio, text):
86
- audio_embed, _ = self.audio_encoder(audio)
87
- caption_embed = self.caption_encoder(text)
88
-
89
- return caption_embed, audio_embed, self.logit_scale.exp()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIWaves/Debate/src/agents/evolve.py DELETED
@@ -1,17 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 The AIWaves Inc. team.
3
-
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
-
17
- """self evolution of an LLM autonoumous agent"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Abhilashvj/planogram-compliance/utils.py DELETED
@@ -1,61 +0,0 @@
1
- import json
2
- import os
3
-
4
- # from sklearn.externals import joblib
5
- import joblib
6
- import numpy as np
7
- import pandas as pd
8
-
9
- # from .variables import old_ocr_req_cols
10
- # from .skew_correction import PageSkewWraper
11
-
12
- const_HW = 1.294117647
13
- const_W = 600
14
-
15
-
16
- def bucket_sort(df, colmn, ymax_col="ymax", ymin_col="ymin"):
17
- df["line_number"] = 0
18
- colmn.append("line_number")
19
- array_value = df[colmn].values
20
- start_index = Line_counter = counter = 0
21
- ymax, ymin, line_no = (
22
- colmn.index(ymax_col),
23
- colmn.index(ymin_col),
24
- colmn.index("line_number"),
25
- )
26
- while counter < len(array_value):
27
- current_ymax = array_value[start_index][ymax]
28
- for next_index in range(start_index, len(array_value)):
29
- counter += 1
30
-
31
- next_ymin = array_value[next_index][ymin]
32
- next_ymax = array_value[next_index][ymax]
33
- if current_ymax > next_ymin:
34
-
35
- array_value[next_index][line_no] = Line_counter + 1
36
- # if current_ymax < next_ymax:
37
-
38
- # current_ymax = next_ymax
39
- else:
40
- counter -= 1
41
- break
42
- # print(counter, len(array_value), start_index)
43
- start_index = counter
44
- Line_counter += 1
45
- return pd.DataFrame(array_value, columns=colmn)
46
-
47
-
48
- def do_sorting(df):
49
- df.sort_values(["ymin", "xmin"], ascending=True, inplace=True)
50
- df["idx"] = df.index
51
- if "line_number" in df.columns:
52
- print("line number removed")
53
- df.drop("line_number", axis=1, inplace=True)
54
- req_colns = ["xmin", "ymin", "xmax", "ymax", "idx"]
55
- temp_df = df.copy()
56
- temp = bucket_sort(temp_df.copy(), req_colns)
57
- df = df.merge(temp[["idx", "line_number"]], on="idx")
58
- df.sort_values(["line_number", "xmin"], ascending=True, inplace=True)
59
- df = df.reset_index(drop=True)
60
- df = df.reset_index(drop=True)
61
- return df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/hteyun.py DELETED
@@ -1,34 +0,0 @@
1
- import requests
2
- import os
3
- import json
4
- from ...typing import sha256, Dict, get_type_hints
5
-
6
- url = 'https://hteyun.com'
7
- model = ['gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0613']
8
- supports_stream = True
9
- needs_auth = False
10
-
11
- def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
12
- headers = {
13
- 'Content-Type': 'application/json',
14
- 'Accept': 'application/json, text/plain, */*',
15
- 'Accept-Language': 'ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7,ja;q=0.6,zh-TW;q=0.5,zh;q=0.4',
16
- 'Origin': 'https://hteyun.com',
17
- 'Referer': 'https://hteyun.com/chat/',
18
- }
19
- data = {
20
- 'messages': messages,
21
- 'model': model,
22
- 'systemMessage': 'You are ChatGPT, a large language model trained by OpenAI. Follow the user\'s instructions carefully. Respond using russian language.',
23
- 'temperature': 0.7,
24
- 'presence_penalty': 0,
25
- }
26
- response = requests.post(url + '/api/chat-stream', json=data, headers=headers, stream=True)
27
- print(response.json())
28
-
29
- # Извлечение текста из response
30
- return response.json()['text']
31
-
32
-
33
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
34
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aer0xander/sd-to-diffusers/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: SD To Diffusers
3
- emoji: 🎨➡️🧨
4
- colorFrom: indigo
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.9.1
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- duplicated_from: diffusers/sd-to-diffusers
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/holygrail/Factory.d.ts DELETED
@@ -1,5 +0,0 @@
1
- import HolyGrail from './HolyGrail';
2
-
3
- export default function (
4
- config?: HolyGrail.IConfig
5
- ): HolyGrail;
 
 
 
 
 
 
spaces/AlgoveraAI/dcgan-crypto-punks/README.md DELETED
@@ -1,38 +0,0 @@
1
- ---
2
- title: Dcgan Crypto Punks
3
- emoji: 📚
4
- colorFrom: purple
5
- colorTo: blue
6
- sdk: gradio
7
- # sdk_version: 3.3
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- # Configuration
13
-
14
- `title`: _string_
15
- Display title for the Space
16
-
17
- `emoji`: _string_
18
- Space emoji (emoji-only character allowed)
19
-
20
- `colorFrom`: _string_
21
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
22
-
23
- `colorTo`: _string_
24
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
25
-
26
- `sdk`: _string_
27
- Can be either `gradio` or `streamlit`
28
-
29
- `sdk_version` : _string_
30
- Only applicable for `streamlit` SDK.
31
- See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
32
-
33
- `app_file`: _string_
34
- Path to your main application file (which contains either `gradio` or `streamlit` Python code).
35
- Path is relative to the root of the repository.
36
-
37
- `pinned`: _boolean_
38
- Whether the Space stays on top of your list.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/configs/base.py DELETED
@@ -1,56 +0,0 @@
1
- from easydict import EasyDict as edict
2
-
3
- # make training faster
4
- # our RAM is 256G
5
- # mount -t tmpfs -o size=140G tmpfs /train_tmp
6
-
7
- config = edict()
8
- config.loss = "arcface"
9
- config.network = "r50"
10
- config.resume = False
11
- config.output = "ms1mv3_arcface_r50"
12
-
13
- config.dataset = "ms1m-retinaface-t1"
14
- config.embedding_size = 512
15
- config.sample_rate = 1
16
- config.fp16 = False
17
- config.momentum = 0.9
18
- config.weight_decay = 5e-4
19
- config.batch_size = 128
20
- config.lr = 0.1 # batch size is 512
21
-
22
- if config.dataset == "emore":
23
- config.rec = "/train_tmp/faces_emore"
24
- config.num_classes = 85742
25
- config.num_image = 5822653
26
- config.num_epoch = 16
27
- config.warmup_epoch = -1
28
- config.decay_epoch = [8, 14, ]
29
- config.val_targets = ["lfw", ]
30
-
31
- elif config.dataset == "ms1m-retinaface-t1":
32
- config.rec = "/train_tmp/ms1m-retinaface-t1"
33
- config.num_classes = 93431
34
- config.num_image = 5179510
35
- config.num_epoch = 25
36
- config.warmup_epoch = -1
37
- config.decay_epoch = [11, 17, 22]
38
- config.val_targets = ["lfw", "cfp_fp", "agedb_30"]
39
-
40
- elif config.dataset == "glint360k":
41
- config.rec = "/train_tmp/glint360k"
42
- config.num_classes = 360232
43
- config.num_image = 17091657
44
- config.num_epoch = 20
45
- config.warmup_epoch = -1
46
- config.decay_epoch = [8, 12, 15, 18]
47
- config.val_targets = ["lfw", "cfp_fp", "agedb_30"]
48
-
49
- elif config.dataset == "webface":
50
- config.rec = "/train_tmp/faces_webface_112x112"
51
- config.num_classes = 10572
52
- config.num_image = "forget"
53
- config.num_epoch = 34
54
- config.warmup_epoch = -1
55
- config.decay_epoch = [20, 28, 32]
56
- config.val_targets = ["lfw", "cfp_fp", "agedb_30"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/eval_ijbc.py DELETED
@@ -1,483 +0,0 @@
1
- # coding: utf-8
2
-
3
- import os
4
- import pickle
5
-
6
- import matplotlib
7
- import pandas as pd
8
-
9
- matplotlib.use('Agg')
10
- import matplotlib.pyplot as plt
11
- import timeit
12
- import sklearn
13
- import argparse
14
- import cv2
15
- import numpy as np
16
- import torch
17
- from skimage import transform as trans
18
- from backbones import get_model
19
- from sklearn.metrics import roc_curve, auc
20
-
21
- from menpo.visualize.viewmatplotlib import sample_colours_from_colourmap
22
- from prettytable import PrettyTable
23
- from pathlib import Path
24
-
25
- import sys
26
- import warnings
27
-
28
- sys.path.insert(0, "../")
29
- warnings.filterwarnings("ignore")
30
-
31
- parser = argparse.ArgumentParser(description='do ijb test')
32
- # general
33
- parser.add_argument('--model-prefix', default='', help='path to load model.')
34
- parser.add_argument('--image-path', default='', type=str, help='')
35
- parser.add_argument('--result-dir', default='.', type=str, help='')
36
- parser.add_argument('--batch-size', default=128, type=int, help='')
37
- parser.add_argument('--network', default='iresnet50', type=str, help='')
38
- parser.add_argument('--job', default='insightface', type=str, help='job name')
39
- parser.add_argument('--target', default='IJBC', type=str, help='target, set to IJBC or IJBB')
40
- args = parser.parse_args()
41
-
42
- target = args.target
43
- model_path = args.model_prefix
44
- image_path = args.image_path
45
- result_dir = args.result_dir
46
- gpu_id = None
47
- use_norm_score = True # if Ture, TestMode(N1)
48
- use_detector_score = True # if Ture, TestMode(D1)
49
- use_flip_test = True # if Ture, TestMode(F1)
50
- job = args.job
51
- batch_size = args.batch_size
52
-
53
-
54
- class Embedding(object):
55
- def __init__(self, prefix, data_shape, batch_size=1):
56
- image_size = (112, 112)
57
- self.image_size = image_size
58
- weight = torch.load(prefix)
59
- resnet = get_model(args.network, dropout=0, fp16=False).cuda()
60
- resnet.load_state_dict(weight)
61
- model = torch.nn.DataParallel(resnet)
62
- self.model = model
63
- self.model.eval()
64
- src = np.array([
65
- [30.2946, 51.6963],
66
- [65.5318, 51.5014],
67
- [48.0252, 71.7366],
68
- [33.5493, 92.3655],
69
- [62.7299, 92.2041]], dtype=np.float32)
70
- src[:, 0] += 8.0
71
- self.src = src
72
- self.batch_size = batch_size
73
- self.data_shape = data_shape
74
-
75
- def get(self, rimg, landmark):
76
-
77
- assert landmark.shape[0] == 68 or landmark.shape[0] == 5
78
- assert landmark.shape[1] == 2
79
- if landmark.shape[0] == 68:
80
- landmark5 = np.zeros((5, 2), dtype=np.float32)
81
- landmark5[0] = (landmark[36] + landmark[39]) / 2
82
- landmark5[1] = (landmark[42] + landmark[45]) / 2
83
- landmark5[2] = landmark[30]
84
- landmark5[3] = landmark[48]
85
- landmark5[4] = landmark[54]
86
- else:
87
- landmark5 = landmark
88
- tform = trans.SimilarityTransform()
89
- tform.estimate(landmark5, self.src)
90
- M = tform.params[0:2, :]
91
- img = cv2.warpAffine(rimg,
92
- M, (self.image_size[1], self.image_size[0]),
93
- borderValue=0.0)
94
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
95
- img_flip = np.fliplr(img)
96
- img = np.transpose(img, (2, 0, 1)) # 3*112*112, RGB
97
- img_flip = np.transpose(img_flip, (2, 0, 1))
98
- input_blob = np.zeros((2, 3, self.image_size[1], self.image_size[0]), dtype=np.uint8)
99
- input_blob[0] = img
100
- input_blob[1] = img_flip
101
- return input_blob
102
-
103
- @torch.no_grad()
104
- def forward_db(self, batch_data):
105
- imgs = torch.Tensor(batch_data).cuda()
106
- imgs.div_(255).sub_(0.5).div_(0.5)
107
- feat = self.model(imgs)
108
- feat = feat.reshape([self.batch_size, 2 * feat.shape[1]])
109
- return feat.cpu().numpy()
110
-
111
-
112
- # 将一个list尽量均分成n份,限制len(list)==n,份数大于原list内元素个数则分配空list[]
113
- def divideIntoNstrand(listTemp, n):
114
- twoList = [[] for i in range(n)]
115
- for i, e in enumerate(listTemp):
116
- twoList[i % n].append(e)
117
- return twoList
118
-
119
-
120
- def read_template_media_list(path):
121
- # ijb_meta = np.loadtxt(path, dtype=str)
122
- ijb_meta = pd.read_csv(path, sep=' ', header=None).values
123
- templates = ijb_meta[:, 1].astype(np.int)
124
- medias = ijb_meta[:, 2].astype(np.int)
125
- return templates, medias
126
-
127
-
128
- # In[ ]:
129
-
130
-
131
- def read_template_pair_list(path):
132
- # pairs = np.loadtxt(path, dtype=str)
133
- pairs = pd.read_csv(path, sep=' ', header=None).values
134
- # print(pairs.shape)
135
- # print(pairs[:, 0].astype(np.int))
136
- t1 = pairs[:, 0].astype(np.int)
137
- t2 = pairs[:, 1].astype(np.int)
138
- label = pairs[:, 2].astype(np.int)
139
- return t1, t2, label
140
-
141
-
142
- # In[ ]:
143
-
144
-
145
- def read_image_feature(path):
146
- with open(path, 'rb') as fid:
147
- img_feats = pickle.load(fid)
148
- return img_feats
149
-
150
-
151
- # In[ ]:
152
-
153
-
154
- def get_image_feature(img_path, files_list, model_path, epoch, gpu_id):
155
- batch_size = args.batch_size
156
- data_shape = (3, 112, 112)
157
-
158
- files = files_list
159
- print('files:', len(files))
160
- rare_size = len(files) % batch_size
161
- faceness_scores = []
162
- batch = 0
163
- img_feats = np.empty((len(files), 1024), dtype=np.float32)
164
-
165
- batch_data = np.empty((2 * batch_size, 3, 112, 112))
166
- embedding = Embedding(model_path, data_shape, batch_size)
167
- for img_index, each_line in enumerate(files[:len(files) - rare_size]):
168
- name_lmk_score = each_line.strip().split(' ')
169
- img_name = os.path.join(img_path, name_lmk_score[0])
170
- img = cv2.imread(img_name)
171
- lmk = np.array([float(x) for x in name_lmk_score[1:-1]],
172
- dtype=np.float32)
173
- lmk = lmk.reshape((5, 2))
174
- input_blob = embedding.get(img, lmk)
175
-
176
- batch_data[2 * (img_index - batch * batch_size)][:] = input_blob[0]
177
- batch_data[2 * (img_index - batch * batch_size) + 1][:] = input_blob[1]
178
- if (img_index + 1) % batch_size == 0:
179
- print('batch', batch)
180
- img_feats[batch * batch_size:batch * batch_size +
181
- batch_size][:] = embedding.forward_db(batch_data)
182
- batch += 1
183
- faceness_scores.append(name_lmk_score[-1])
184
-
185
- batch_data = np.empty((2 * rare_size, 3, 112, 112))
186
- embedding = Embedding(model_path, data_shape, rare_size)
187
- for img_index, each_line in enumerate(files[len(files) - rare_size:]):
188
- name_lmk_score = each_line.strip().split(' ')
189
- img_name = os.path.join(img_path, name_lmk_score[0])
190
- img = cv2.imread(img_name)
191
- lmk = np.array([float(x) for x in name_lmk_score[1:-1]],
192
- dtype=np.float32)
193
- lmk = lmk.reshape((5, 2))
194
- input_blob = embedding.get(img, lmk)
195
- batch_data[2 * img_index][:] = input_blob[0]
196
- batch_data[2 * img_index + 1][:] = input_blob[1]
197
- if (img_index + 1) % rare_size == 0:
198
- print('batch', batch)
199
- img_feats[len(files) -
200
- rare_size:][:] = embedding.forward_db(batch_data)
201
- batch += 1
202
- faceness_scores.append(name_lmk_score[-1])
203
- faceness_scores = np.array(faceness_scores).astype(np.float32)
204
- # img_feats = np.ones( (len(files), 1024), dtype=np.float32) * 0.01
205
- # faceness_scores = np.ones( (len(files), ), dtype=np.float32 )
206
- return img_feats, faceness_scores
207
-
208
-
209
- # In[ ]:
210
-
211
-
212
- def image2template_feature(img_feats=None, templates=None, medias=None):
213
- # ==========================================================
214
- # 1. face image feature l2 normalization. img_feats:[number_image x feats_dim]
215
- # 2. compute media feature.
216
- # 3. compute template feature.
217
- # ==========================================================
218
- unique_templates = np.unique(templates)
219
- template_feats = np.zeros((len(unique_templates), img_feats.shape[1]))
220
-
221
- for count_template, uqt in enumerate(unique_templates):
222
-
223
- (ind_t,) = np.where(templates == uqt)
224
- face_norm_feats = img_feats[ind_t]
225
- face_medias = medias[ind_t]
226
- unique_medias, unique_media_counts = np.unique(face_medias,
227
- return_counts=True)
228
- media_norm_feats = []
229
- for u, ct in zip(unique_medias, unique_media_counts):
230
- (ind_m,) = np.where(face_medias == u)
231
- if ct == 1:
232
- media_norm_feats += [face_norm_feats[ind_m]]
233
- else: # image features from the same video will be aggregated into one feature
234
- media_norm_feats += [
235
- np.mean(face_norm_feats[ind_m], axis=0, keepdims=True)
236
- ]
237
- media_norm_feats = np.array(media_norm_feats)
238
- # media_norm_feats = media_norm_feats / np.sqrt(np.sum(media_norm_feats ** 2, -1, keepdims=True))
239
- template_feats[count_template] = np.sum(media_norm_feats, axis=0)
240
- if count_template % 2000 == 0:
241
- print('Finish Calculating {} template features.'.format(
242
- count_template))
243
- # template_norm_feats = template_feats / np.sqrt(np.sum(template_feats ** 2, -1, keepdims=True))
244
- template_norm_feats = sklearn.preprocessing.normalize(template_feats)
245
- # print(template_norm_feats.shape)
246
- return template_norm_feats, unique_templates
247
-
248
-
249
- # In[ ]:
250
-
251
-
252
- def verification(template_norm_feats=None,
253
- unique_templates=None,
254
- p1=None,
255
- p2=None):
256
- # ==========================================================
257
- # Compute set-to-set Similarity Score.
258
- # ==========================================================
259
- template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int)
260
- for count_template, uqt in enumerate(unique_templates):
261
- template2id[uqt] = count_template
262
-
263
- score = np.zeros((len(p1),)) # save cosine distance between pairs
264
-
265
- total_pairs = np.array(range(len(p1)))
266
- batchsize = 100000 # small batchsize instead of all pairs in one batch due to the memory limiation
267
- sublists = [
268
- total_pairs[i:i + batchsize] for i in range(0, len(p1), batchsize)
269
- ]
270
- total_sublists = len(sublists)
271
- for c, s in enumerate(sublists):
272
- feat1 = template_norm_feats[template2id[p1[s]]]
273
- feat2 = template_norm_feats[template2id[p2[s]]]
274
- similarity_score = np.sum(feat1 * feat2, -1)
275
- score[s] = similarity_score.flatten()
276
- if c % 10 == 0:
277
- print('Finish {}/{} pairs.'.format(c, total_sublists))
278
- return score
279
-
280
-
281
- # In[ ]:
282
- def verification2(template_norm_feats=None,
283
- unique_templates=None,
284
- p1=None,
285
- p2=None):
286
- template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int)
287
- for count_template, uqt in enumerate(unique_templates):
288
- template2id[uqt] = count_template
289
- score = np.zeros((len(p1),)) # save cosine distance between pairs
290
- total_pairs = np.array(range(len(p1)))
291
- batchsize = 100000 # small batchsize instead of all pairs in one batch due to the memory limiation
292
- sublists = [
293
- total_pairs[i:i + batchsize] for i in range(0, len(p1), batchsize)
294
- ]
295
- total_sublists = len(sublists)
296
- for c, s in enumerate(sublists):
297
- feat1 = template_norm_feats[template2id[p1[s]]]
298
- feat2 = template_norm_feats[template2id[p2[s]]]
299
- similarity_score = np.sum(feat1 * feat2, -1)
300
- score[s] = similarity_score.flatten()
301
- if c % 10 == 0:
302
- print('Finish {}/{} pairs.'.format(c, total_sublists))
303
- return score
304
-
305
-
306
- def read_score(path):
307
- with open(path, 'rb') as fid:
308
- img_feats = pickle.load(fid)
309
- return img_feats
310
-
311
-
312
- # # Step1: Load Meta Data
313
-
314
- # In[ ]:
315
-
316
- assert target == 'IJBC' or target == 'IJBB'
317
-
318
- # =============================================================
319
- # load image and template relationships for template feature embedding
320
- # tid --> template id, mid --> media id
321
- # format:
322
- # image_name tid mid
323
- # =============================================================
324
- start = timeit.default_timer()
325
- templates, medias = read_template_media_list(
326
- os.path.join('%s/meta' % image_path,
327
- '%s_face_tid_mid.txt' % target.lower()))
328
- stop = timeit.default_timer()
329
- print('Time: %.2f s. ' % (stop - start))
330
-
331
- # In[ ]:
332
-
333
- # =============================================================
334
- # load template pairs for template-to-template verification
335
- # tid : template id, label : 1/0
336
- # format:
337
- # tid_1 tid_2 label
338
- # =============================================================
339
- start = timeit.default_timer()
340
- p1, p2, label = read_template_pair_list(
341
- os.path.join('%s/meta' % image_path,
342
- '%s_template_pair_label.txt' % target.lower()))
343
- stop = timeit.default_timer()
344
- print('Time: %.2f s. ' % (stop - start))
345
-
346
- # # Step 2: Get Image Features
347
-
348
- # In[ ]:
349
-
350
- # =============================================================
351
- # load image features
352
- # format:
353
- # img_feats: [image_num x feats_dim] (227630, 512)
354
- # =============================================================
355
- start = timeit.default_timer()
356
- img_path = '%s/loose_crop' % image_path
357
- img_list_path = '%s/meta/%s_name_5pts_score.txt' % (image_path, target.lower())
358
- img_list = open(img_list_path)
359
- files = img_list.readlines()
360
- # files_list = divideIntoNstrand(files, rank_size)
361
- files_list = files
362
-
363
- # img_feats
364
- # for i in range(rank_size):
365
- img_feats, faceness_scores = get_image_feature(img_path, files_list,
366
- model_path, 0, gpu_id)
367
- stop = timeit.default_timer()
368
- print('Time: %.2f s. ' % (stop - start))
369
- print('Feature Shape: ({} , {}) .'.format(img_feats.shape[0],
370
- img_feats.shape[1]))
371
-
372
- # # Step3: Get Template Features
373
-
374
- # In[ ]:
375
-
376
- # =============================================================
377
- # compute template features from image features.
378
- # =============================================================
379
- start = timeit.default_timer()
380
- # ==========================================================
381
- # Norm feature before aggregation into template feature?
382
- # Feature norm from embedding network and faceness score are able to decrease weights for noise samples (not face).
383
- # ==========================================================
384
- # 1. FaceScore (Feature Norm)
385
- # 2. FaceScore (Detector)
386
-
387
- if use_flip_test:
388
- # concat --- F1
389
- # img_input_feats = img_feats
390
- # add --- F2
391
- img_input_feats = img_feats[:, 0:img_feats.shape[1] //
392
- 2] + img_feats[:, img_feats.shape[1] // 2:]
393
- else:
394
- img_input_feats = img_feats[:, 0:img_feats.shape[1] // 2]
395
-
396
- if use_norm_score:
397
- img_input_feats = img_input_feats
398
- else:
399
- # normalise features to remove norm information
400
- img_input_feats = img_input_feats / np.sqrt(
401
- np.sum(img_input_feats ** 2, -1, keepdims=True))
402
-
403
- if use_detector_score:
404
- print(img_input_feats.shape, faceness_scores.shape)
405
- img_input_feats = img_input_feats * faceness_scores[:, np.newaxis]
406
- else:
407
- img_input_feats = img_input_feats
408
-
409
- template_norm_feats, unique_templates = image2template_feature(
410
- img_input_feats, templates, medias)
411
- stop = timeit.default_timer()
412
- print('Time: %.2f s. ' % (stop - start))
413
-
414
- # # Step 4: Get Template Similarity Scores
415
-
416
- # In[ ]:
417
-
418
- # =============================================================
419
- # compute verification scores between template pairs.
420
- # =============================================================
421
- start = timeit.default_timer()
422
- score = verification(template_norm_feats, unique_templates, p1, p2)
423
- stop = timeit.default_timer()
424
- print('Time: %.2f s. ' % (stop - start))
425
-
426
- # In[ ]:
427
- save_path = os.path.join(result_dir, args.job)
428
- # save_path = result_dir + '/%s_result' % target
429
-
430
- if not os.path.exists(save_path):
431
- os.makedirs(save_path)
432
-
433
- score_save_file = os.path.join(save_path, "%s.npy" % target.lower())
434
- np.save(score_save_file, score)
435
-
436
- # # Step 5: Get ROC Curves and TPR@FPR Table
437
-
438
- # In[ ]:
439
-
440
- files = [score_save_file]
441
- methods = []
442
- scores = []
443
- for file in files:
444
- methods.append(Path(file).stem)
445
- scores.append(np.load(file))
446
-
447
- methods = np.array(methods)
448
- scores = dict(zip(methods, scores))
449
- colours = dict(
450
- zip(methods, sample_colours_from_colourmap(methods.shape[0], 'Set2')))
451
- x_labels = [10 ** -6, 10 ** -5, 10 ** -4, 10 ** -3, 10 ** -2, 10 ** -1]
452
- tpr_fpr_table = PrettyTable(['Methods'] + [str(x) for x in x_labels])
453
- fig = plt.figure()
454
- for method in methods:
455
- fpr, tpr, _ = roc_curve(label, scores[method])
456
- roc_auc = auc(fpr, tpr)
457
- fpr = np.flipud(fpr)
458
- tpr = np.flipud(tpr) # select largest tpr at same fpr
459
- plt.plot(fpr,
460
- tpr,
461
- color=colours[method],
462
- lw=1,
463
- label=('[%s (AUC = %0.4f %%)]' %
464
- (method.split('-')[-1], roc_auc * 100)))
465
- tpr_fpr_row = []
466
- tpr_fpr_row.append("%s-%s" % (method, target))
467
- for fpr_iter in np.arange(len(x_labels)):
468
- _, min_index = min(
469
- list(zip(abs(fpr - x_labels[fpr_iter]), range(len(fpr)))))
470
- tpr_fpr_row.append('%.2f' % (tpr[min_index] * 100))
471
- tpr_fpr_table.add_row(tpr_fpr_row)
472
- plt.xlim([10 ** -6, 0.1])
473
- plt.ylim([0.3, 1.0])
474
- plt.grid(linestyle='--', linewidth=1)
475
- plt.xticks(x_labels)
476
- plt.yticks(np.linspace(0.3, 1.0, 8, endpoint=True))
477
- plt.xscale('log')
478
- plt.xlabel('False Positive Rate')
479
- plt.ylabel('True Positive Rate')
480
- plt.title('ROC on IJB')
481
- plt.legend(loc="lower right")
482
- fig.savefig(os.path.join(save_path, '%s.pdf' % target.lower()))
483
- print(tpr_fpr_table)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_if.py DELETED
@@ -1,1257 +0,0 @@
1
- import argparse
2
- import inspect
3
- import os
4
-
5
- import numpy as np
6
- import torch
7
- from torch.nn import functional as F
8
- from transformers import CLIPConfig, CLIPImageProcessor, CLIPVisionModelWithProjection, T5EncoderModel, T5Tokenizer
9
-
10
- from diffusers import DDPMScheduler, IFPipeline, IFSuperResolutionPipeline, UNet2DConditionModel
11
- from diffusers.pipelines.deepfloyd_if.safety_checker import IFSafetyChecker
12
-
13
-
14
- try:
15
- from omegaconf import OmegaConf
16
- except ImportError:
17
- raise ImportError(
18
- "OmegaConf is required to convert the IF checkpoints. Please install it with `pip install" " OmegaConf`."
19
- )
20
-
21
-
22
- def parse_args():
23
- parser = argparse.ArgumentParser()
24
-
25
- parser.add_argument("--dump_path", required=False, default=None, type=str)
26
-
27
- parser.add_argument("--dump_path_stage_2", required=False, default=None, type=str)
28
-
29
- parser.add_argument("--dump_path_stage_3", required=False, default=None, type=str)
30
-
31
- parser.add_argument("--unet_config", required=False, default=None, type=str, help="Path to unet config file")
32
-
33
- parser.add_argument(
34
- "--unet_checkpoint_path", required=False, default=None, type=str, help="Path to unet checkpoint file"
35
- )
36
-
37
- parser.add_argument(
38
- "--unet_checkpoint_path_stage_2",
39
- required=False,
40
- default=None,
41
- type=str,
42
- help="Path to stage 2 unet checkpoint file",
43
- )
44
-
45
- parser.add_argument(
46
- "--unet_checkpoint_path_stage_3",
47
- required=False,
48
- default=None,
49
- type=str,
50
- help="Path to stage 3 unet checkpoint file",
51
- )
52
-
53
- parser.add_argument("--p_head_path", type=str, required=True)
54
-
55
- parser.add_argument("--w_head_path", type=str, required=True)
56
-
57
- args = parser.parse_args()
58
-
59
- return args
60
-
61
-
62
- def main(args):
63
- tokenizer = T5Tokenizer.from_pretrained("google/t5-v1_1-xxl")
64
- text_encoder = T5EncoderModel.from_pretrained("google/t5-v1_1-xxl")
65
-
66
- feature_extractor = CLIPImageProcessor.from_pretrained("openai/clip-vit-large-patch14")
67
- safety_checker = convert_safety_checker(p_head_path=args.p_head_path, w_head_path=args.w_head_path)
68
-
69
- if args.unet_config is not None and args.unet_checkpoint_path is not None and args.dump_path is not None:
70
- convert_stage_1_pipeline(tokenizer, text_encoder, feature_extractor, safety_checker, args)
71
-
72
- if args.unet_checkpoint_path_stage_2 is not None and args.dump_path_stage_2 is not None:
73
- convert_super_res_pipeline(tokenizer, text_encoder, feature_extractor, safety_checker, args, stage=2)
74
-
75
- if args.unet_checkpoint_path_stage_3 is not None and args.dump_path_stage_3 is not None:
76
- convert_super_res_pipeline(tokenizer, text_encoder, feature_extractor, safety_checker, args, stage=3)
77
-
78
-
79
- def convert_stage_1_pipeline(tokenizer, text_encoder, feature_extractor, safety_checker, args):
80
- unet = get_stage_1_unet(args.unet_config, args.unet_checkpoint_path)
81
-
82
- scheduler = DDPMScheduler(
83
- variance_type="learned_range",
84
- beta_schedule="squaredcos_cap_v2",
85
- prediction_type="epsilon",
86
- thresholding=True,
87
- dynamic_thresholding_ratio=0.95,
88
- sample_max_value=1.5,
89
- )
90
-
91
- pipe = IFPipeline(
92
- tokenizer=tokenizer,
93
- text_encoder=text_encoder,
94
- unet=unet,
95
- scheduler=scheduler,
96
- safety_checker=safety_checker,
97
- feature_extractor=feature_extractor,
98
- requires_safety_checker=True,
99
- )
100
-
101
- pipe.save_pretrained(args.dump_path)
102
-
103
-
104
- def convert_super_res_pipeline(tokenizer, text_encoder, feature_extractor, safety_checker, args, stage):
105
- if stage == 2:
106
- unet_checkpoint_path = args.unet_checkpoint_path_stage_2
107
- sample_size = None
108
- dump_path = args.dump_path_stage_2
109
- elif stage == 3:
110
- unet_checkpoint_path = args.unet_checkpoint_path_stage_3
111
- sample_size = 1024
112
- dump_path = args.dump_path_stage_3
113
- else:
114
- assert False
115
-
116
- unet = get_super_res_unet(unet_checkpoint_path, verify_param_count=False, sample_size=sample_size)
117
-
118
- image_noising_scheduler = DDPMScheduler(
119
- beta_schedule="squaredcos_cap_v2",
120
- )
121
-
122
- scheduler = DDPMScheduler(
123
- variance_type="learned_range",
124
- beta_schedule="squaredcos_cap_v2",
125
- prediction_type="epsilon",
126
- thresholding=True,
127
- dynamic_thresholding_ratio=0.95,
128
- sample_max_value=1.0,
129
- )
130
-
131
- pipe = IFSuperResolutionPipeline(
132
- tokenizer=tokenizer,
133
- text_encoder=text_encoder,
134
- unet=unet,
135
- scheduler=scheduler,
136
- image_noising_scheduler=image_noising_scheduler,
137
- safety_checker=safety_checker,
138
- feature_extractor=feature_extractor,
139
- requires_safety_checker=True,
140
- )
141
-
142
- pipe.save_pretrained(dump_path)
143
-
144
-
145
- def get_stage_1_unet(unet_config, unet_checkpoint_path):
146
- original_unet_config = OmegaConf.load(unet_config)
147
- original_unet_config = original_unet_config.params
148
-
149
- unet_diffusers_config = create_unet_diffusers_config(original_unet_config)
150
-
151
- unet = UNet2DConditionModel(**unet_diffusers_config)
152
-
153
- device = "cuda" if torch.cuda.is_available() else "cpu"
154
- unet_checkpoint = torch.load(unet_checkpoint_path, map_location=device)
155
-
156
- converted_unet_checkpoint = convert_ldm_unet_checkpoint(
157
- unet_checkpoint, unet_diffusers_config, path=unet_checkpoint_path
158
- )
159
-
160
- unet.load_state_dict(converted_unet_checkpoint)
161
-
162
- return unet
163
-
164
-
165
- def convert_safety_checker(p_head_path, w_head_path):
166
- state_dict = {}
167
-
168
- # p head
169
-
170
- p_head = np.load(p_head_path)
171
-
172
- p_head_weights = p_head["weights"]
173
- p_head_weights = torch.from_numpy(p_head_weights)
174
- p_head_weights = p_head_weights.unsqueeze(0)
175
-
176
- p_head_biases = p_head["biases"]
177
- p_head_biases = torch.from_numpy(p_head_biases)
178
- p_head_biases = p_head_biases.unsqueeze(0)
179
-
180
- state_dict["p_head.weight"] = p_head_weights
181
- state_dict["p_head.bias"] = p_head_biases
182
-
183
- # w head
184
-
185
- w_head = np.load(w_head_path)
186
-
187
- w_head_weights = w_head["weights"]
188
- w_head_weights = torch.from_numpy(w_head_weights)
189
- w_head_weights = w_head_weights.unsqueeze(0)
190
-
191
- w_head_biases = w_head["biases"]
192
- w_head_biases = torch.from_numpy(w_head_biases)
193
- w_head_biases = w_head_biases.unsqueeze(0)
194
-
195
- state_dict["w_head.weight"] = w_head_weights
196
- state_dict["w_head.bias"] = w_head_biases
197
-
198
- # vision model
199
-
200
- vision_model = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
201
- vision_model_state_dict = vision_model.state_dict()
202
-
203
- for key, value in vision_model_state_dict.items():
204
- key = f"vision_model.{key}"
205
- state_dict[key] = value
206
-
207
- # full model
208
-
209
- config = CLIPConfig.from_pretrained("openai/clip-vit-large-patch14")
210
- safety_checker = IFSafetyChecker(config)
211
-
212
- safety_checker.load_state_dict(state_dict)
213
-
214
- return safety_checker
215
-
216
-
217
- def create_unet_diffusers_config(original_unet_config, class_embed_type=None):
218
- attention_resolutions = parse_list(original_unet_config.attention_resolutions)
219
- attention_resolutions = [original_unet_config.image_size // int(res) for res in attention_resolutions]
220
-
221
- channel_mult = parse_list(original_unet_config.channel_mult)
222
- block_out_channels = [original_unet_config.model_channels * mult for mult in channel_mult]
223
-
224
- down_block_types = []
225
- resolution = 1
226
-
227
- for i in range(len(block_out_channels)):
228
- if resolution in attention_resolutions:
229
- block_type = "SimpleCrossAttnDownBlock2D"
230
- elif original_unet_config.resblock_updown:
231
- block_type = "ResnetDownsampleBlock2D"
232
- else:
233
- block_type = "DownBlock2D"
234
-
235
- down_block_types.append(block_type)
236
-
237
- if i != len(block_out_channels) - 1:
238
- resolution *= 2
239
-
240
- up_block_types = []
241
- for i in range(len(block_out_channels)):
242
- if resolution in attention_resolutions:
243
- block_type = "SimpleCrossAttnUpBlock2D"
244
- elif original_unet_config.resblock_updown:
245
- block_type = "ResnetUpsampleBlock2D"
246
- else:
247
- block_type = "UpBlock2D"
248
- up_block_types.append(block_type)
249
- resolution //= 2
250
-
251
- head_dim = original_unet_config.num_head_channels
252
-
253
- use_linear_projection = (
254
- original_unet_config.use_linear_in_transformer
255
- if "use_linear_in_transformer" in original_unet_config
256
- else False
257
- )
258
- if use_linear_projection:
259
- # stable diffusion 2-base-512 and 2-768
260
- if head_dim is None:
261
- head_dim = [5, 10, 20, 20]
262
-
263
- projection_class_embeddings_input_dim = None
264
-
265
- if class_embed_type is None:
266
- if "num_classes" in original_unet_config:
267
- if original_unet_config.num_classes == "sequential":
268
- class_embed_type = "projection"
269
- assert "adm_in_channels" in original_unet_config
270
- projection_class_embeddings_input_dim = original_unet_config.adm_in_channels
271
- else:
272
- raise NotImplementedError(
273
- f"Unknown conditional unet num_classes config: {original_unet_config.num_classes}"
274
- )
275
-
276
- config = {
277
- "sample_size": original_unet_config.image_size,
278
- "in_channels": original_unet_config.in_channels,
279
- "down_block_types": tuple(down_block_types),
280
- "block_out_channels": tuple(block_out_channels),
281
- "layers_per_block": original_unet_config.num_res_blocks,
282
- "cross_attention_dim": original_unet_config.encoder_channels,
283
- "attention_head_dim": head_dim,
284
- "use_linear_projection": use_linear_projection,
285
- "class_embed_type": class_embed_type,
286
- "projection_class_embeddings_input_dim": projection_class_embeddings_input_dim,
287
- "out_channels": original_unet_config.out_channels,
288
- "up_block_types": tuple(up_block_types),
289
- "upcast_attention": False, # TODO: guessing
290
- "cross_attention_norm": "group_norm",
291
- "mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
292
- "addition_embed_type": "text",
293
- "act_fn": "gelu",
294
- }
295
-
296
- if original_unet_config.use_scale_shift_norm:
297
- config["resnet_time_scale_shift"] = "scale_shift"
298
-
299
- if "encoder_dim" in original_unet_config:
300
- config["encoder_hid_dim"] = original_unet_config.encoder_dim
301
-
302
- return config
303
-
304
-
305
- def convert_ldm_unet_checkpoint(unet_state_dict, config, path=None):
306
- """
307
- Takes a state dict and a config, and returns a converted checkpoint.
308
- """
309
- new_checkpoint = {}
310
-
311
- new_checkpoint["time_embedding.linear_1.weight"] = unet_state_dict["time_embed.0.weight"]
312
- new_checkpoint["time_embedding.linear_1.bias"] = unet_state_dict["time_embed.0.bias"]
313
- new_checkpoint["time_embedding.linear_2.weight"] = unet_state_dict["time_embed.2.weight"]
314
- new_checkpoint["time_embedding.linear_2.bias"] = unet_state_dict["time_embed.2.bias"]
315
-
316
- if config["class_embed_type"] in [None, "identity"]:
317
- # No parameters to port
318
- ...
319
- elif config["class_embed_type"] == "timestep" or config["class_embed_type"] == "projection":
320
- new_checkpoint["class_embedding.linear_1.weight"] = unet_state_dict["label_emb.0.0.weight"]
321
- new_checkpoint["class_embedding.linear_1.bias"] = unet_state_dict["label_emb.0.0.bias"]
322
- new_checkpoint["class_embedding.linear_2.weight"] = unet_state_dict["label_emb.0.2.weight"]
323
- new_checkpoint["class_embedding.linear_2.bias"] = unet_state_dict["label_emb.0.2.bias"]
324
- else:
325
- raise NotImplementedError(f"Not implemented `class_embed_type`: {config['class_embed_type']}")
326
-
327
- new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"]
328
- new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"]
329
-
330
- new_checkpoint["conv_norm_out.weight"] = unet_state_dict["out.0.weight"]
331
- new_checkpoint["conv_norm_out.bias"] = unet_state_dict["out.0.bias"]
332
- new_checkpoint["conv_out.weight"] = unet_state_dict["out.2.weight"]
333
- new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"]
334
-
335
- # Retrieves the keys for the input blocks only
336
- num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer})
337
- input_blocks = {
338
- layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}." in key]
339
- for layer_id in range(num_input_blocks)
340
- }
341
-
342
- # Retrieves the keys for the middle blocks only
343
- num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer})
344
- middle_blocks = {
345
- layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key]
346
- for layer_id in range(num_middle_blocks)
347
- }
348
-
349
- # Retrieves the keys for the output blocks only
350
- num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer})
351
- output_blocks = {
352
- layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}." in key]
353
- for layer_id in range(num_output_blocks)
354
- }
355
-
356
- for i in range(1, num_input_blocks):
357
- block_id = (i - 1) // (config["layers_per_block"] + 1)
358
- layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1)
359
-
360
- resnets = [
361
- key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key
362
- ]
363
- attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key]
364
-
365
- if f"input_blocks.{i}.0.op.weight" in unet_state_dict:
366
- new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop(
367
- f"input_blocks.{i}.0.op.weight"
368
- )
369
- new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop(
370
- f"input_blocks.{i}.0.op.bias"
371
- )
372
-
373
- paths = renew_resnet_paths(resnets)
374
-
375
- # TODO need better check than i in [4, 8, 12, 16]
376
- block_type = config["down_block_types"][block_id]
377
- if (block_type == "ResnetDownsampleBlock2D" or block_type == "SimpleCrossAttnDownBlock2D") and i in [
378
- 4,
379
- 8,
380
- 12,
381
- 16,
382
- ]:
383
- meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.downsamplers.0"}
384
- else:
385
- meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
386
-
387
- assign_to_checkpoint(
388
- paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
389
- )
390
-
391
- if len(attentions):
392
- old_path = f"input_blocks.{i}.1"
393
- new_path = f"down_blocks.{block_id}.attentions.{layer_in_block_id}"
394
-
395
- assign_attention_to_checkpoint(
396
- new_checkpoint=new_checkpoint,
397
- unet_state_dict=unet_state_dict,
398
- old_path=old_path,
399
- new_path=new_path,
400
- config=config,
401
- )
402
-
403
- paths = renew_attention_paths(attentions)
404
- meta_path = {"old": old_path, "new": new_path}
405
- assign_to_checkpoint(
406
- paths,
407
- new_checkpoint,
408
- unet_state_dict,
409
- additional_replacements=[meta_path],
410
- config=config,
411
- )
412
-
413
- resnet_0 = middle_blocks[0]
414
- attentions = middle_blocks[1]
415
- resnet_1 = middle_blocks[2]
416
-
417
- resnet_0_paths = renew_resnet_paths(resnet_0)
418
- assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config)
419
-
420
- resnet_1_paths = renew_resnet_paths(resnet_1)
421
- assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config)
422
-
423
- old_path = "middle_block.1"
424
- new_path = "mid_block.attentions.0"
425
-
426
- assign_attention_to_checkpoint(
427
- new_checkpoint=new_checkpoint,
428
- unet_state_dict=unet_state_dict,
429
- old_path=old_path,
430
- new_path=new_path,
431
- config=config,
432
- )
433
-
434
- attentions_paths = renew_attention_paths(attentions)
435
- meta_path = {"old": "middle_block.1", "new": "mid_block.attentions.0"}
436
- assign_to_checkpoint(
437
- attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
438
- )
439
-
440
- for i in range(num_output_blocks):
441
- block_id = i // (config["layers_per_block"] + 1)
442
- layer_in_block_id = i % (config["layers_per_block"] + 1)
443
- output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]]
444
- output_block_list = {}
445
-
446
- for layer in output_block_layers:
447
- layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1)
448
- if layer_id in output_block_list:
449
- output_block_list[layer_id].append(layer_name)
450
- else:
451
- output_block_list[layer_id] = [layer_name]
452
-
453
- # len(output_block_list) == 1 -> resnet
454
- # len(output_block_list) == 2 -> resnet, attention
455
- # len(output_block_list) == 3 -> resnet, attention, upscale resnet
456
-
457
- if len(output_block_list) > 1:
458
- resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key]
459
- attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key]
460
-
461
- paths = renew_resnet_paths(resnets)
462
-
463
- meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
464
-
465
- assign_to_checkpoint(
466
- paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
467
- )
468
-
469
- output_block_list = {k: sorted(v) for k, v in output_block_list.items()}
470
- if ["conv.bias", "conv.weight"] in output_block_list.values():
471
- index = list(output_block_list.values()).index(["conv.bias", "conv.weight"])
472
- new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[
473
- f"output_blocks.{i}.{index}.conv.weight"
474
- ]
475
- new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[
476
- f"output_blocks.{i}.{index}.conv.bias"
477
- ]
478
-
479
- # Clear attentions as they have been attributed above.
480
- if len(attentions) == 2:
481
- attentions = []
482
-
483
- if len(attentions):
484
- old_path = f"output_blocks.{i}.1"
485
- new_path = f"up_blocks.{block_id}.attentions.{layer_in_block_id}"
486
-
487
- assign_attention_to_checkpoint(
488
- new_checkpoint=new_checkpoint,
489
- unet_state_dict=unet_state_dict,
490
- old_path=old_path,
491
- new_path=new_path,
492
- config=config,
493
- )
494
-
495
- paths = renew_attention_paths(attentions)
496
- meta_path = {
497
- "old": old_path,
498
- "new": new_path,
499
- }
500
- assign_to_checkpoint(
501
- paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
502
- )
503
-
504
- if len(output_block_list) == 3:
505
- resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.2" in key]
506
- paths = renew_resnet_paths(resnets)
507
- meta_path = {"old": f"output_blocks.{i}.2", "new": f"up_blocks.{block_id}.upsamplers.0"}
508
- assign_to_checkpoint(
509
- paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
510
- )
511
- else:
512
- resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1)
513
- for path in resnet_0_paths:
514
- old_path = ".".join(["output_blocks", str(i), path["old"]])
515
- new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]])
516
-
517
- new_checkpoint[new_path] = unet_state_dict[old_path]
518
-
519
- if "encoder_proj.weight" in unet_state_dict:
520
- new_checkpoint["encoder_hid_proj.weight"] = unet_state_dict.pop("encoder_proj.weight")
521
- new_checkpoint["encoder_hid_proj.bias"] = unet_state_dict.pop("encoder_proj.bias")
522
-
523
- if "encoder_pooling.0.weight" in unet_state_dict:
524
- new_checkpoint["add_embedding.norm1.weight"] = unet_state_dict.pop("encoder_pooling.0.weight")
525
- new_checkpoint["add_embedding.norm1.bias"] = unet_state_dict.pop("encoder_pooling.0.bias")
526
-
527
- new_checkpoint["add_embedding.pool.positional_embedding"] = unet_state_dict.pop(
528
- "encoder_pooling.1.positional_embedding"
529
- )
530
- new_checkpoint["add_embedding.pool.k_proj.weight"] = unet_state_dict.pop("encoder_pooling.1.k_proj.weight")
531
- new_checkpoint["add_embedding.pool.k_proj.bias"] = unet_state_dict.pop("encoder_pooling.1.k_proj.bias")
532
- new_checkpoint["add_embedding.pool.q_proj.weight"] = unet_state_dict.pop("encoder_pooling.1.q_proj.weight")
533
- new_checkpoint["add_embedding.pool.q_proj.bias"] = unet_state_dict.pop("encoder_pooling.1.q_proj.bias")
534
- new_checkpoint["add_embedding.pool.v_proj.weight"] = unet_state_dict.pop("encoder_pooling.1.v_proj.weight")
535
- new_checkpoint["add_embedding.pool.v_proj.bias"] = unet_state_dict.pop("encoder_pooling.1.v_proj.bias")
536
-
537
- new_checkpoint["add_embedding.proj.weight"] = unet_state_dict.pop("encoder_pooling.2.weight")
538
- new_checkpoint["add_embedding.proj.bias"] = unet_state_dict.pop("encoder_pooling.2.bias")
539
-
540
- new_checkpoint["add_embedding.norm2.weight"] = unet_state_dict.pop("encoder_pooling.3.weight")
541
- new_checkpoint["add_embedding.norm2.bias"] = unet_state_dict.pop("encoder_pooling.3.bias")
542
-
543
- return new_checkpoint
544
-
545
-
546
- def shave_segments(path, n_shave_prefix_segments=1):
547
- """
548
- Removes segments. Positive values shave the first segments, negative shave the last segments.
549
- """
550
- if n_shave_prefix_segments >= 0:
551
- return ".".join(path.split(".")[n_shave_prefix_segments:])
552
- else:
553
- return ".".join(path.split(".")[:n_shave_prefix_segments])
554
-
555
-
556
- def renew_resnet_paths(old_list, n_shave_prefix_segments=0):
557
- """
558
- Updates paths inside resnets to the new naming scheme (local renaming)
559
- """
560
- mapping = []
561
- for old_item in old_list:
562
- new_item = old_item.replace("in_layers.0", "norm1")
563
- new_item = new_item.replace("in_layers.2", "conv1")
564
-
565
- new_item = new_item.replace("out_layers.0", "norm2")
566
- new_item = new_item.replace("out_layers.3", "conv2")
567
-
568
- new_item = new_item.replace("emb_layers.1", "time_emb_proj")
569
- new_item = new_item.replace("skip_connection", "conv_shortcut")
570
-
571
- new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
572
-
573
- mapping.append({"old": old_item, "new": new_item})
574
-
575
- return mapping
576
-
577
-
578
- def renew_attention_paths(old_list, n_shave_prefix_segments=0):
579
- """
580
- Updates paths inside attentions to the new naming scheme (local renaming)
581
- """
582
- mapping = []
583
- for old_item in old_list:
584
- new_item = old_item
585
-
586
- if "qkv" in new_item:
587
- continue
588
-
589
- if "encoder_kv" in new_item:
590
- continue
591
-
592
- new_item = new_item.replace("norm.weight", "group_norm.weight")
593
- new_item = new_item.replace("norm.bias", "group_norm.bias")
594
-
595
- new_item = new_item.replace("proj_out.weight", "to_out.0.weight")
596
- new_item = new_item.replace("proj_out.bias", "to_out.0.bias")
597
-
598
- new_item = new_item.replace("norm_encoder.weight", "norm_cross.weight")
599
- new_item = new_item.replace("norm_encoder.bias", "norm_cross.bias")
600
-
601
- new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
602
-
603
- mapping.append({"old": old_item, "new": new_item})
604
-
605
- return mapping
606
-
607
-
608
- def assign_attention_to_checkpoint(new_checkpoint, unet_state_dict, old_path, new_path, config):
609
- qkv_weight = unet_state_dict.pop(f"{old_path}.qkv.weight")
610
- qkv_weight = qkv_weight[:, :, 0]
611
-
612
- qkv_bias = unet_state_dict.pop(f"{old_path}.qkv.bias")
613
-
614
- is_cross_attn_only = "only_cross_attention" in config and config["only_cross_attention"]
615
-
616
- split = 1 if is_cross_attn_only else 3
617
-
618
- weights, bias = split_attentions(
619
- weight=qkv_weight,
620
- bias=qkv_bias,
621
- split=split,
622
- chunk_size=config["attention_head_dim"],
623
- )
624
-
625
- if is_cross_attn_only:
626
- query_weight, q_bias = weights, bias
627
- new_checkpoint[f"{new_path}.to_q.weight"] = query_weight[0]
628
- new_checkpoint[f"{new_path}.to_q.bias"] = q_bias[0]
629
- else:
630
- [query_weight, key_weight, value_weight], [q_bias, k_bias, v_bias] = weights, bias
631
- new_checkpoint[f"{new_path}.to_q.weight"] = query_weight
632
- new_checkpoint[f"{new_path}.to_q.bias"] = q_bias
633
- new_checkpoint[f"{new_path}.to_k.weight"] = key_weight
634
- new_checkpoint[f"{new_path}.to_k.bias"] = k_bias
635
- new_checkpoint[f"{new_path}.to_v.weight"] = value_weight
636
- new_checkpoint[f"{new_path}.to_v.bias"] = v_bias
637
-
638
- encoder_kv_weight = unet_state_dict.pop(f"{old_path}.encoder_kv.weight")
639
- encoder_kv_weight = encoder_kv_weight[:, :, 0]
640
-
641
- encoder_kv_bias = unet_state_dict.pop(f"{old_path}.encoder_kv.bias")
642
-
643
- [encoder_k_weight, encoder_v_weight], [encoder_k_bias, encoder_v_bias] = split_attentions(
644
- weight=encoder_kv_weight,
645
- bias=encoder_kv_bias,
646
- split=2,
647
- chunk_size=config["attention_head_dim"],
648
- )
649
-
650
- new_checkpoint[f"{new_path}.add_k_proj.weight"] = encoder_k_weight
651
- new_checkpoint[f"{new_path}.add_k_proj.bias"] = encoder_k_bias
652
- new_checkpoint[f"{new_path}.add_v_proj.weight"] = encoder_v_weight
653
- new_checkpoint[f"{new_path}.add_v_proj.bias"] = encoder_v_bias
654
-
655
-
656
- def assign_to_checkpoint(paths, checkpoint, old_checkpoint, additional_replacements=None, config=None):
657
- """
658
- This does the final conversion step: take locally converted weights and apply a global renaming to them. It splits
659
- attention layers, and takes into account additional replacements that may arise.
660
-
661
- Assigns the weights to the new checkpoint.
662
- """
663
- assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys."
664
-
665
- for path in paths:
666
- new_path = path["new"]
667
-
668
- # Global renaming happens here
669
- new_path = new_path.replace("middle_block.0", "mid_block.resnets.0")
670
- new_path = new_path.replace("middle_block.1", "mid_block.attentions.0")
671
- new_path = new_path.replace("middle_block.2", "mid_block.resnets.1")
672
-
673
- if additional_replacements is not None:
674
- for replacement in additional_replacements:
675
- new_path = new_path.replace(replacement["old"], replacement["new"])
676
-
677
- # proj_attn.weight has to be converted from conv 1D to linear
678
- if "proj_attn.weight" in new_path or "to_out.0.weight" in new_path:
679
- checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0]
680
- else:
681
- checkpoint[new_path] = old_checkpoint[path["old"]]
682
-
683
-
684
- # TODO maybe document and/or can do more efficiently (build indices in for loop and extract once for each split?)
685
- def split_attentions(*, weight, bias, split, chunk_size):
686
- weights = [None] * split
687
- biases = [None] * split
688
-
689
- weights_biases_idx = 0
690
-
691
- for starting_row_index in range(0, weight.shape[0], chunk_size):
692
- row_indices = torch.arange(starting_row_index, starting_row_index + chunk_size)
693
-
694
- weight_rows = weight[row_indices, :]
695
- bias_rows = bias[row_indices]
696
-
697
- if weights[weights_biases_idx] is None:
698
- weights[weights_biases_idx] = weight_rows
699
- biases[weights_biases_idx] = bias_rows
700
- else:
701
- assert weights[weights_biases_idx] is not None
702
- weights[weights_biases_idx] = torch.concat([weights[weights_biases_idx], weight_rows])
703
- biases[weights_biases_idx] = torch.concat([biases[weights_biases_idx], bias_rows])
704
-
705
- weights_biases_idx = (weights_biases_idx + 1) % split
706
-
707
- return weights, biases
708
-
709
-
710
- def parse_list(value):
711
- if isinstance(value, str):
712
- value = value.split(",")
713
- value = [int(v) for v in value]
714
- elif isinstance(value, list):
715
- pass
716
- else:
717
- raise ValueError(f"Can't parse list for type: {type(value)}")
718
-
719
- return value
720
-
721
-
722
- # below is copy and pasted from original convert_if_stage_2.py script
723
-
724
-
725
- def get_super_res_unet(unet_checkpoint_path, verify_param_count=True, sample_size=None):
726
- orig_path = unet_checkpoint_path
727
-
728
- original_unet_config = OmegaConf.load(os.path.join(orig_path, "config.yml"))
729
- original_unet_config = original_unet_config.params
730
-
731
- unet_diffusers_config = superres_create_unet_diffusers_config(original_unet_config)
732
- unet_diffusers_config["time_embedding_dim"] = original_unet_config.model_channels * int(
733
- original_unet_config.channel_mult.split(",")[-1]
734
- )
735
- if original_unet_config.encoder_dim != original_unet_config.encoder_channels:
736
- unet_diffusers_config["encoder_hid_dim"] = original_unet_config.encoder_dim
737
- unet_diffusers_config["class_embed_type"] = "timestep"
738
- unet_diffusers_config["addition_embed_type"] = "text"
739
-
740
- unet_diffusers_config["time_embedding_act_fn"] = "gelu"
741
- unet_diffusers_config["resnet_skip_time_act"] = True
742
- unet_diffusers_config["resnet_out_scale_factor"] = 1 / 0.7071
743
- unet_diffusers_config["mid_block_scale_factor"] = 1 / 0.7071
744
- unet_diffusers_config["only_cross_attention"] = (
745
- bool(original_unet_config.disable_self_attentions)
746
- if (
747
- "disable_self_attentions" in original_unet_config
748
- and isinstance(original_unet_config.disable_self_attentions, int)
749
- )
750
- else True
751
- )
752
-
753
- if sample_size is None:
754
- unet_diffusers_config["sample_size"] = original_unet_config.image_size
755
- else:
756
- # The second upscaler unet's sample size is incorrectly specified
757
- # in the config and is instead hardcoded in source
758
- unet_diffusers_config["sample_size"] = sample_size
759
-
760
- unet_checkpoint = torch.load(os.path.join(unet_checkpoint_path, "pytorch_model.bin"), map_location="cpu")
761
-
762
- if verify_param_count:
763
- # check that architecture matches - is a bit slow
764
- verify_param_count(orig_path, unet_diffusers_config)
765
-
766
- converted_unet_checkpoint = superres_convert_ldm_unet_checkpoint(
767
- unet_checkpoint, unet_diffusers_config, path=unet_checkpoint_path
768
- )
769
- converted_keys = converted_unet_checkpoint.keys()
770
-
771
- model = UNet2DConditionModel(**unet_diffusers_config)
772
- expected_weights = model.state_dict().keys()
773
-
774
- diff_c_e = set(converted_keys) - set(expected_weights)
775
- diff_e_c = set(expected_weights) - set(converted_keys)
776
-
777
- assert len(diff_e_c) == 0, f"Expected, but not converted: {diff_e_c}"
778
- assert len(diff_c_e) == 0, f"Converted, but not expected: {diff_c_e}"
779
-
780
- model.load_state_dict(converted_unet_checkpoint)
781
-
782
- return model
783
-
784
-
785
- def superres_create_unet_diffusers_config(original_unet_config):
786
- attention_resolutions = parse_list(original_unet_config.attention_resolutions)
787
- attention_resolutions = [original_unet_config.image_size // int(res) for res in attention_resolutions]
788
-
789
- channel_mult = parse_list(original_unet_config.channel_mult)
790
- block_out_channels = [original_unet_config.model_channels * mult for mult in channel_mult]
791
-
792
- down_block_types = []
793
- resolution = 1
794
-
795
- for i in range(len(block_out_channels)):
796
- if resolution in attention_resolutions:
797
- block_type = "SimpleCrossAttnDownBlock2D"
798
- elif original_unet_config.resblock_updown:
799
- block_type = "ResnetDownsampleBlock2D"
800
- else:
801
- block_type = "DownBlock2D"
802
-
803
- down_block_types.append(block_type)
804
-
805
- if i != len(block_out_channels) - 1:
806
- resolution *= 2
807
-
808
- up_block_types = []
809
- for i in range(len(block_out_channels)):
810
- if resolution in attention_resolutions:
811
- block_type = "SimpleCrossAttnUpBlock2D"
812
- elif original_unet_config.resblock_updown:
813
- block_type = "ResnetUpsampleBlock2D"
814
- else:
815
- block_type = "UpBlock2D"
816
- up_block_types.append(block_type)
817
- resolution //= 2
818
-
819
- head_dim = original_unet_config.num_head_channels
820
- use_linear_projection = (
821
- original_unet_config.use_linear_in_transformer
822
- if "use_linear_in_transformer" in original_unet_config
823
- else False
824
- )
825
- if use_linear_projection:
826
- # stable diffusion 2-base-512 and 2-768
827
- if head_dim is None:
828
- head_dim = [5, 10, 20, 20]
829
-
830
- class_embed_type = None
831
- projection_class_embeddings_input_dim = None
832
-
833
- if "num_classes" in original_unet_config:
834
- if original_unet_config.num_classes == "sequential":
835
- class_embed_type = "projection"
836
- assert "adm_in_channels" in original_unet_config
837
- projection_class_embeddings_input_dim = original_unet_config.adm_in_channels
838
- else:
839
- raise NotImplementedError(
840
- f"Unknown conditional unet num_classes config: {original_unet_config.num_classes}"
841
- )
842
-
843
- config = {
844
- "in_channels": original_unet_config.in_channels,
845
- "down_block_types": tuple(down_block_types),
846
- "block_out_channels": tuple(block_out_channels),
847
- "layers_per_block": tuple(original_unet_config.num_res_blocks),
848
- "cross_attention_dim": original_unet_config.encoder_channels,
849
- "attention_head_dim": head_dim,
850
- "use_linear_projection": use_linear_projection,
851
- "class_embed_type": class_embed_type,
852
- "projection_class_embeddings_input_dim": projection_class_embeddings_input_dim,
853
- "out_channels": original_unet_config.out_channels,
854
- "up_block_types": tuple(up_block_types),
855
- "upcast_attention": False, # TODO: guessing
856
- "cross_attention_norm": "group_norm",
857
- "mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
858
- "act_fn": "gelu",
859
- }
860
-
861
- if original_unet_config.use_scale_shift_norm:
862
- config["resnet_time_scale_shift"] = "scale_shift"
863
-
864
- return config
865
-
866
-
867
- def superres_convert_ldm_unet_checkpoint(unet_state_dict, config, path=None, extract_ema=False):
868
- """
869
- Takes a state dict and a config, and returns a converted checkpoint.
870
- """
871
- new_checkpoint = {}
872
-
873
- new_checkpoint["time_embedding.linear_1.weight"] = unet_state_dict["time_embed.0.weight"]
874
- new_checkpoint["time_embedding.linear_1.bias"] = unet_state_dict["time_embed.0.bias"]
875
- new_checkpoint["time_embedding.linear_2.weight"] = unet_state_dict["time_embed.2.weight"]
876
- new_checkpoint["time_embedding.linear_2.bias"] = unet_state_dict["time_embed.2.bias"]
877
-
878
- if config["class_embed_type"] is None:
879
- # No parameters to port
880
- ...
881
- elif config["class_embed_type"] == "timestep" or config["class_embed_type"] == "projection":
882
- new_checkpoint["class_embedding.linear_1.weight"] = unet_state_dict["aug_proj.0.weight"]
883
- new_checkpoint["class_embedding.linear_1.bias"] = unet_state_dict["aug_proj.0.bias"]
884
- new_checkpoint["class_embedding.linear_2.weight"] = unet_state_dict["aug_proj.2.weight"]
885
- new_checkpoint["class_embedding.linear_2.bias"] = unet_state_dict["aug_proj.2.bias"]
886
- else:
887
- raise NotImplementedError(f"Not implemented `class_embed_type`: {config['class_embed_type']}")
888
-
889
- if "encoder_proj.weight" in unet_state_dict:
890
- new_checkpoint["encoder_hid_proj.weight"] = unet_state_dict["encoder_proj.weight"]
891
- new_checkpoint["encoder_hid_proj.bias"] = unet_state_dict["encoder_proj.bias"]
892
-
893
- if "encoder_pooling.0.weight" in unet_state_dict:
894
- mapping = {
895
- "encoder_pooling.0": "add_embedding.norm1",
896
- "encoder_pooling.1": "add_embedding.pool",
897
- "encoder_pooling.2": "add_embedding.proj",
898
- "encoder_pooling.3": "add_embedding.norm2",
899
- }
900
- for key in unet_state_dict.keys():
901
- if key.startswith("encoder_pooling"):
902
- prefix = key[: len("encoder_pooling.0")]
903
- new_key = key.replace(prefix, mapping[prefix])
904
- new_checkpoint[new_key] = unet_state_dict[key]
905
-
906
- new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"]
907
- new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"]
908
-
909
- new_checkpoint["conv_norm_out.weight"] = unet_state_dict["out.0.weight"]
910
- new_checkpoint["conv_norm_out.bias"] = unet_state_dict["out.0.bias"]
911
- new_checkpoint["conv_out.weight"] = unet_state_dict["out.2.weight"]
912
- new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"]
913
-
914
- # Retrieves the keys for the input blocks only
915
- num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer})
916
- input_blocks = {
917
- layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}." in key]
918
- for layer_id in range(num_input_blocks)
919
- }
920
-
921
- # Retrieves the keys for the middle blocks only
922
- num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer})
923
- middle_blocks = {
924
- layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key]
925
- for layer_id in range(num_middle_blocks)
926
- }
927
-
928
- # Retrieves the keys for the output blocks only
929
- num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer})
930
- output_blocks = {
931
- layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}." in key]
932
- for layer_id in range(num_output_blocks)
933
- }
934
- if not isinstance(config["layers_per_block"], int):
935
- layers_per_block_list = [e + 1 for e in config["layers_per_block"]]
936
- layers_per_block_cumsum = list(np.cumsum(layers_per_block_list))
937
- downsampler_ids = layers_per_block_cumsum
938
- else:
939
- # TODO need better check than i in [4, 8, 12, 16]
940
- downsampler_ids = [4, 8, 12, 16]
941
-
942
- for i in range(1, num_input_blocks):
943
- if isinstance(config["layers_per_block"], int):
944
- layers_per_block = config["layers_per_block"]
945
- block_id = (i - 1) // (layers_per_block + 1)
946
- layer_in_block_id = (i - 1) % (layers_per_block + 1)
947
- else:
948
- block_id = next(k for k, n in enumerate(layers_per_block_cumsum) if (i - 1) < n)
949
- passed_blocks = layers_per_block_cumsum[block_id - 1] if block_id > 0 else 0
950
- layer_in_block_id = (i - 1) - passed_blocks
951
-
952
- resnets = [
953
- key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key
954
- ]
955
- attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key]
956
-
957
- if f"input_blocks.{i}.0.op.weight" in unet_state_dict:
958
- new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop(
959
- f"input_blocks.{i}.0.op.weight"
960
- )
961
- new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop(
962
- f"input_blocks.{i}.0.op.bias"
963
- )
964
-
965
- paths = renew_resnet_paths(resnets)
966
-
967
- block_type = config["down_block_types"][block_id]
968
- if (
969
- block_type == "ResnetDownsampleBlock2D" or block_type == "SimpleCrossAttnDownBlock2D"
970
- ) and i in downsampler_ids:
971
- meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.downsamplers.0"}
972
- else:
973
- meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
974
-
975
- assign_to_checkpoint(
976
- paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
977
- )
978
-
979
- if len(attentions):
980
- old_path = f"input_blocks.{i}.1"
981
- new_path = f"down_blocks.{block_id}.attentions.{layer_in_block_id}"
982
-
983
- assign_attention_to_checkpoint(
984
- new_checkpoint=new_checkpoint,
985
- unet_state_dict=unet_state_dict,
986
- old_path=old_path,
987
- new_path=new_path,
988
- config=config,
989
- )
990
-
991
- paths = renew_attention_paths(attentions)
992
- meta_path = {"old": old_path, "new": new_path}
993
- assign_to_checkpoint(
994
- paths,
995
- new_checkpoint,
996
- unet_state_dict,
997
- additional_replacements=[meta_path],
998
- config=config,
999
- )
1000
-
1001
- resnet_0 = middle_blocks[0]
1002
- attentions = middle_blocks[1]
1003
- resnet_1 = middle_blocks[2]
1004
-
1005
- resnet_0_paths = renew_resnet_paths(resnet_0)
1006
- assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config)
1007
-
1008
- resnet_1_paths = renew_resnet_paths(resnet_1)
1009
- assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config)
1010
-
1011
- old_path = "middle_block.1"
1012
- new_path = "mid_block.attentions.0"
1013
-
1014
- assign_attention_to_checkpoint(
1015
- new_checkpoint=new_checkpoint,
1016
- unet_state_dict=unet_state_dict,
1017
- old_path=old_path,
1018
- new_path=new_path,
1019
- config=config,
1020
- )
1021
-
1022
- attentions_paths = renew_attention_paths(attentions)
1023
- meta_path = {"old": "middle_block.1", "new": "mid_block.attentions.0"}
1024
- assign_to_checkpoint(
1025
- attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
1026
- )
1027
- if not isinstance(config["layers_per_block"], int):
1028
- layers_per_block_list = list(reversed([e + 1 for e in config["layers_per_block"]]))
1029
- layers_per_block_cumsum = list(np.cumsum(layers_per_block_list))
1030
-
1031
- for i in range(num_output_blocks):
1032
- if isinstance(config["layers_per_block"], int):
1033
- layers_per_block = config["layers_per_block"]
1034
- block_id = i // (layers_per_block + 1)
1035
- layer_in_block_id = i % (layers_per_block + 1)
1036
- else:
1037
- block_id = next(k for k, n in enumerate(layers_per_block_cumsum) if i < n)
1038
- passed_blocks = layers_per_block_cumsum[block_id - 1] if block_id > 0 else 0
1039
- layer_in_block_id = i - passed_blocks
1040
-
1041
- output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]]
1042
- output_block_list = {}
1043
-
1044
- for layer in output_block_layers:
1045
- layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1)
1046
- if layer_id in output_block_list:
1047
- output_block_list[layer_id].append(layer_name)
1048
- else:
1049
- output_block_list[layer_id] = [layer_name]
1050
-
1051
- # len(output_block_list) == 1 -> resnet
1052
- # len(output_block_list) == 2 -> resnet, attention or resnet, upscale resnet
1053
- # len(output_block_list) == 3 -> resnet, attention, upscale resnet
1054
-
1055
- if len(output_block_list) > 1:
1056
- resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key]
1057
-
1058
- has_attention = True
1059
- if len(output_block_list) == 2 and any("in_layers" in k for k in output_block_list["1"]):
1060
- has_attention = False
1061
-
1062
- maybe_attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key]
1063
-
1064
- paths = renew_resnet_paths(resnets)
1065
-
1066
- meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
1067
-
1068
- assign_to_checkpoint(
1069
- paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
1070
- )
1071
-
1072
- output_block_list = {k: sorted(v) for k, v in output_block_list.items()}
1073
- if ["conv.bias", "conv.weight"] in output_block_list.values():
1074
- index = list(output_block_list.values()).index(["conv.bias", "conv.weight"])
1075
- new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[
1076
- f"output_blocks.{i}.{index}.conv.weight"
1077
- ]
1078
- new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[
1079
- f"output_blocks.{i}.{index}.conv.bias"
1080
- ]
1081
-
1082
- # this layer was no attention
1083
- has_attention = False
1084
- maybe_attentions = []
1085
-
1086
- if has_attention:
1087
- old_path = f"output_blocks.{i}.1"
1088
- new_path = f"up_blocks.{block_id}.attentions.{layer_in_block_id}"
1089
-
1090
- assign_attention_to_checkpoint(
1091
- new_checkpoint=new_checkpoint,
1092
- unet_state_dict=unet_state_dict,
1093
- old_path=old_path,
1094
- new_path=new_path,
1095
- config=config,
1096
- )
1097
-
1098
- paths = renew_attention_paths(maybe_attentions)
1099
- meta_path = {
1100
- "old": old_path,
1101
- "new": new_path,
1102
- }
1103
- assign_to_checkpoint(
1104
- paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
1105
- )
1106
-
1107
- if len(output_block_list) == 3 or (not has_attention and len(maybe_attentions) > 0):
1108
- layer_id = len(output_block_list) - 1
1109
- resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.{layer_id}" in key]
1110
- paths = renew_resnet_paths(resnets)
1111
- meta_path = {"old": f"output_blocks.{i}.{layer_id}", "new": f"up_blocks.{block_id}.upsamplers.0"}
1112
- assign_to_checkpoint(
1113
- paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
1114
- )
1115
- else:
1116
- resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1)
1117
- for path in resnet_0_paths:
1118
- old_path = ".".join(["output_blocks", str(i), path["old"]])
1119
- new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]])
1120
-
1121
- new_checkpoint[new_path] = unet_state_dict[old_path]
1122
-
1123
- return new_checkpoint
1124
-
1125
-
1126
- def verify_param_count(orig_path, unet_diffusers_config):
1127
- if "-II-" in orig_path:
1128
- from deepfloyd_if.modules import IFStageII
1129
-
1130
- if_II = IFStageII(device="cpu", dir_or_name=orig_path)
1131
- elif "-III-" in orig_path:
1132
- from deepfloyd_if.modules import IFStageIII
1133
-
1134
- if_II = IFStageIII(device="cpu", dir_or_name=orig_path)
1135
- else:
1136
- assert f"Weird name. Should have -II- or -III- in path: {orig_path}"
1137
-
1138
- unet = UNet2DConditionModel(**unet_diffusers_config)
1139
-
1140
- # in params
1141
- assert_param_count(unet.time_embedding, if_II.model.time_embed)
1142
- assert_param_count(unet.conv_in, if_II.model.input_blocks[:1])
1143
-
1144
- # downblocks
1145
- assert_param_count(unet.down_blocks[0], if_II.model.input_blocks[1:4])
1146
- assert_param_count(unet.down_blocks[1], if_II.model.input_blocks[4:7])
1147
- assert_param_count(unet.down_blocks[2], if_II.model.input_blocks[7:11])
1148
-
1149
- if "-II-" in orig_path:
1150
- assert_param_count(unet.down_blocks[3], if_II.model.input_blocks[11:17])
1151
- assert_param_count(unet.down_blocks[4], if_II.model.input_blocks[17:])
1152
- if "-III-" in orig_path:
1153
- assert_param_count(unet.down_blocks[3], if_II.model.input_blocks[11:15])
1154
- assert_param_count(unet.down_blocks[4], if_II.model.input_blocks[15:20])
1155
- assert_param_count(unet.down_blocks[5], if_II.model.input_blocks[20:])
1156
-
1157
- # mid block
1158
- assert_param_count(unet.mid_block, if_II.model.middle_block)
1159
-
1160
- # up block
1161
- if "-II-" in orig_path:
1162
- assert_param_count(unet.up_blocks[0], if_II.model.output_blocks[:6])
1163
- assert_param_count(unet.up_blocks[1], if_II.model.output_blocks[6:12])
1164
- assert_param_count(unet.up_blocks[2], if_II.model.output_blocks[12:16])
1165
- assert_param_count(unet.up_blocks[3], if_II.model.output_blocks[16:19])
1166
- assert_param_count(unet.up_blocks[4], if_II.model.output_blocks[19:])
1167
- if "-III-" in orig_path:
1168
- assert_param_count(unet.up_blocks[0], if_II.model.output_blocks[:5])
1169
- assert_param_count(unet.up_blocks[1], if_II.model.output_blocks[5:10])
1170
- assert_param_count(unet.up_blocks[2], if_II.model.output_blocks[10:14])
1171
- assert_param_count(unet.up_blocks[3], if_II.model.output_blocks[14:18])
1172
- assert_param_count(unet.up_blocks[4], if_II.model.output_blocks[18:21])
1173
- assert_param_count(unet.up_blocks[5], if_II.model.output_blocks[21:24])
1174
-
1175
- # out params
1176
- assert_param_count(unet.conv_norm_out, if_II.model.out[0])
1177
- assert_param_count(unet.conv_out, if_II.model.out[2])
1178
-
1179
- # make sure all model architecture has same param count
1180
- assert_param_count(unet, if_II.model)
1181
-
1182
-
1183
- def assert_param_count(model_1, model_2):
1184
- count_1 = sum(p.numel() for p in model_1.parameters())
1185
- count_2 = sum(p.numel() for p in model_2.parameters())
1186
- assert count_1 == count_2, f"{model_1.__class__}: {count_1} != {model_2.__class__}: {count_2}"
1187
-
1188
-
1189
- def superres_check_against_original(dump_path, unet_checkpoint_path):
1190
- model_path = dump_path
1191
- model = UNet2DConditionModel.from_pretrained(model_path)
1192
- model.to("cuda")
1193
- orig_path = unet_checkpoint_path
1194
-
1195
- if "-II-" in orig_path:
1196
- from deepfloyd_if.modules import IFStageII
1197
-
1198
- if_II_model = IFStageII(device="cuda", dir_or_name=orig_path, model_kwargs={"precision": "fp32"}).model
1199
- elif "-III-" in orig_path:
1200
- from deepfloyd_if.modules import IFStageIII
1201
-
1202
- if_II_model = IFStageIII(device="cuda", dir_or_name=orig_path, model_kwargs={"precision": "fp32"}).model
1203
-
1204
- batch_size = 1
1205
- channels = model.in_channels // 2
1206
- height = model.sample_size
1207
- width = model.sample_size
1208
- height = 1024
1209
- width = 1024
1210
-
1211
- torch.manual_seed(0)
1212
-
1213
- latents = torch.randn((batch_size, channels, height, width), device=model.device)
1214
- image_small = torch.randn((batch_size, channels, height // 4, width // 4), device=model.device)
1215
-
1216
- interpolate_antialias = {}
1217
- if "antialias" in inspect.signature(F.interpolate).parameters:
1218
- interpolate_antialias["antialias"] = True
1219
- image_upscaled = F.interpolate(
1220
- image_small, size=[height, width], mode="bicubic", align_corners=False, **interpolate_antialias
1221
- )
1222
-
1223
- latent_model_input = torch.cat([latents, image_upscaled], dim=1).to(model.dtype)
1224
- t = torch.tensor([5], device=model.device).to(model.dtype)
1225
-
1226
- seq_len = 64
1227
- encoder_hidden_states = torch.randn((batch_size, seq_len, model.config.encoder_hid_dim), device=model.device).to(
1228
- model.dtype
1229
- )
1230
-
1231
- fake_class_labels = torch.tensor([t], device=model.device).to(model.dtype)
1232
-
1233
- with torch.no_grad():
1234
- out = if_II_model(latent_model_input, t, aug_steps=fake_class_labels, text_emb=encoder_hidden_states)
1235
-
1236
- if_II_model.to("cpu")
1237
- del if_II_model
1238
- import gc
1239
-
1240
- torch.cuda.empty_cache()
1241
- gc.collect()
1242
- print(50 * "=")
1243
-
1244
- with torch.no_grad():
1245
- noise_pred = model(
1246
- sample=latent_model_input,
1247
- encoder_hidden_states=encoder_hidden_states,
1248
- class_labels=fake_class_labels,
1249
- timestep=t,
1250
- ).sample
1251
-
1252
- print("Out shape", noise_pred.shape)
1253
- print("Diff", (out - noise_pred).abs().sum())
1254
-
1255
-
1256
- if __name__ == "__main__":
1257
- main(parse_args())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py DELETED
@@ -1,486 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- from typing import Callable, List, Optional, Union
17
-
18
- import numpy as np
19
- import torch
20
- from transformers import CLIPImageProcessor, CLIPTokenizer
21
-
22
- from ...configuration_utils import FrozenDict
23
- from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
24
- from ...utils import deprecate, logging
25
- from ..onnx_utils import ORT_TO_NP_TYPE, OnnxRuntimeModel
26
- from ..pipeline_utils import DiffusionPipeline
27
- from . import StableDiffusionPipelineOutput
28
-
29
-
30
- logger = logging.get_logger(__name__)
31
-
32
-
33
- class OnnxStableDiffusionPipeline(DiffusionPipeline):
34
- vae_encoder: OnnxRuntimeModel
35
- vae_decoder: OnnxRuntimeModel
36
- text_encoder: OnnxRuntimeModel
37
- tokenizer: CLIPTokenizer
38
- unet: OnnxRuntimeModel
39
- scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler]
40
- safety_checker: OnnxRuntimeModel
41
- feature_extractor: CLIPImageProcessor
42
-
43
- _optional_components = ["safety_checker", "feature_extractor"]
44
- _is_onnx = True
45
-
46
- def __init__(
47
- self,
48
- vae_encoder: OnnxRuntimeModel,
49
- vae_decoder: OnnxRuntimeModel,
50
- text_encoder: OnnxRuntimeModel,
51
- tokenizer: CLIPTokenizer,
52
- unet: OnnxRuntimeModel,
53
- scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
54
- safety_checker: OnnxRuntimeModel,
55
- feature_extractor: CLIPImageProcessor,
56
- requires_safety_checker: bool = True,
57
- ):
58
- super().__init__()
59
-
60
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
61
- deprecation_message = (
62
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
63
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
64
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
65
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
66
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
67
- " file"
68
- )
69
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
70
- new_config = dict(scheduler.config)
71
- new_config["steps_offset"] = 1
72
- scheduler._internal_dict = FrozenDict(new_config)
73
-
74
- if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
75
- deprecation_message = (
76
- f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
77
- " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
78
- " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
79
- " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
80
- " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
81
- )
82
- deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
83
- new_config = dict(scheduler.config)
84
- new_config["clip_sample"] = False
85
- scheduler._internal_dict = FrozenDict(new_config)
86
-
87
- if safety_checker is None and requires_safety_checker:
88
- logger.warning(
89
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
90
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
91
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
92
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
93
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
94
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
95
- )
96
-
97
- if safety_checker is not None and feature_extractor is None:
98
- raise ValueError(
99
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
100
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
101
- )
102
-
103
- self.register_modules(
104
- vae_encoder=vae_encoder,
105
- vae_decoder=vae_decoder,
106
- text_encoder=text_encoder,
107
- tokenizer=tokenizer,
108
- unet=unet,
109
- scheduler=scheduler,
110
- safety_checker=safety_checker,
111
- feature_extractor=feature_extractor,
112
- )
113
- self.register_to_config(requires_safety_checker=requires_safety_checker)
114
-
115
- def _encode_prompt(
116
- self,
117
- prompt: Union[str, List[str]],
118
- num_images_per_prompt: Optional[int],
119
- do_classifier_free_guidance: bool,
120
- negative_prompt: Optional[str],
121
- prompt_embeds: Optional[np.ndarray] = None,
122
- negative_prompt_embeds: Optional[np.ndarray] = None,
123
- ):
124
- r"""
125
- Encodes the prompt into text encoder hidden states.
126
-
127
- Args:
128
- prompt (`str` or `List[str]`):
129
- prompt to be encoded
130
- num_images_per_prompt (`int`):
131
- number of images that should be generated per prompt
132
- do_classifier_free_guidance (`bool`):
133
- whether to use classifier free guidance or not
134
- negative_prompt (`str` or `List[str]`):
135
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
136
- if `guidance_scale` is less than `1`).
137
- prompt_embeds (`np.ndarray`, *optional*):
138
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
139
- provided, text embeddings will be generated from `prompt` input argument.
140
- negative_prompt_embeds (`np.ndarray`, *optional*):
141
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
142
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
143
- argument.
144
- """
145
- if prompt is not None and isinstance(prompt, str):
146
- batch_size = 1
147
- elif prompt is not None and isinstance(prompt, list):
148
- batch_size = len(prompt)
149
- else:
150
- batch_size = prompt_embeds.shape[0]
151
-
152
- if prompt_embeds is None:
153
- # get prompt text embeddings
154
- text_inputs = self.tokenizer(
155
- prompt,
156
- padding="max_length",
157
- max_length=self.tokenizer.model_max_length,
158
- truncation=True,
159
- return_tensors="np",
160
- )
161
- text_input_ids = text_inputs.input_ids
162
- untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids
163
-
164
- if not np.array_equal(text_input_ids, untruncated_ids):
165
- removed_text = self.tokenizer.batch_decode(
166
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
167
- )
168
- logger.warning(
169
- "The following part of your input was truncated because CLIP can only handle sequences up to"
170
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
171
- )
172
-
173
- prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0]
174
-
175
- prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0)
176
-
177
- # get unconditional embeddings for classifier free guidance
178
- if do_classifier_free_guidance and negative_prompt_embeds is None:
179
- uncond_tokens: List[str]
180
- if negative_prompt is None:
181
- uncond_tokens = [""] * batch_size
182
- elif type(prompt) is not type(negative_prompt):
183
- raise TypeError(
184
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
185
- f" {type(prompt)}."
186
- )
187
- elif isinstance(negative_prompt, str):
188
- uncond_tokens = [negative_prompt] * batch_size
189
- elif batch_size != len(negative_prompt):
190
- raise ValueError(
191
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
192
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
193
- " the batch size of `prompt`."
194
- )
195
- else:
196
- uncond_tokens = negative_prompt
197
-
198
- max_length = prompt_embeds.shape[1]
199
- uncond_input = self.tokenizer(
200
- uncond_tokens,
201
- padding="max_length",
202
- max_length=max_length,
203
- truncation=True,
204
- return_tensors="np",
205
- )
206
- negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0]
207
-
208
- if do_classifier_free_guidance:
209
- negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0)
210
-
211
- # For classifier free guidance, we need to do two forward passes.
212
- # Here we concatenate the unconditional and text embeddings into a single batch
213
- # to avoid doing two forward passes
214
- prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds])
215
-
216
- return prompt_embeds
217
-
218
- def check_inputs(
219
- self,
220
- prompt: Union[str, List[str]],
221
- height: Optional[int],
222
- width: Optional[int],
223
- callback_steps: int,
224
- negative_prompt: Optional[str] = None,
225
- prompt_embeds: Optional[np.ndarray] = None,
226
- negative_prompt_embeds: Optional[np.ndarray] = None,
227
- ):
228
- if height % 8 != 0 or width % 8 != 0:
229
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
230
-
231
- if (callback_steps is None) or (
232
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
233
- ):
234
- raise ValueError(
235
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
236
- f" {type(callback_steps)}."
237
- )
238
-
239
- if prompt is not None and prompt_embeds is not None:
240
- raise ValueError(
241
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
242
- " only forward one of the two."
243
- )
244
- elif prompt is None and prompt_embeds is None:
245
- raise ValueError(
246
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
247
- )
248
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
249
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
250
-
251
- if negative_prompt is not None and negative_prompt_embeds is not None:
252
- raise ValueError(
253
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
254
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
255
- )
256
-
257
- if prompt_embeds is not None and negative_prompt_embeds is not None:
258
- if prompt_embeds.shape != negative_prompt_embeds.shape:
259
- raise ValueError(
260
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
261
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
262
- f" {negative_prompt_embeds.shape}."
263
- )
264
-
265
- def __call__(
266
- self,
267
- prompt: Union[str, List[str]] = None,
268
- height: Optional[int] = 512,
269
- width: Optional[int] = 512,
270
- num_inference_steps: Optional[int] = 50,
271
- guidance_scale: Optional[float] = 7.5,
272
- negative_prompt: Optional[Union[str, List[str]]] = None,
273
- num_images_per_prompt: Optional[int] = 1,
274
- eta: Optional[float] = 0.0,
275
- generator: Optional[np.random.RandomState] = None,
276
- latents: Optional[np.ndarray] = None,
277
- prompt_embeds: Optional[np.ndarray] = None,
278
- negative_prompt_embeds: Optional[np.ndarray] = None,
279
- output_type: Optional[str] = "pil",
280
- return_dict: bool = True,
281
- callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
282
- callback_steps: int = 1,
283
- ):
284
- r"""
285
- Function invoked when calling the pipeline for generation.
286
-
287
- Args:
288
- prompt (`str` or `List[str]`, *optional*):
289
- The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
290
- instead.
291
- image (`PIL.Image.Image` or List[`PIL.Image.Image`] or `torch.FloatTensor`):
292
- `Image`, or tensor representing an image batch which will be upscaled. *
293
- num_inference_steps (`int`, *optional*, defaults to 50):
294
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
295
- expense of slower inference.
296
- guidance_scale (`float`, *optional*, defaults to 7.5):
297
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
298
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
299
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
300
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
301
- usually at the expense of lower image quality.
302
- negative_prompt (`str` or `List[str]`, *optional*):
303
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
304
- `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale`
305
- is less than `1`).
306
- num_images_per_prompt (`int`, *optional*, defaults to 1):
307
- The number of images to generate per prompt.
308
- eta (`float`, *optional*, defaults to 0.0):
309
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
310
- [`schedulers.DDIMScheduler`], will be ignored for others.
311
- generator (`np.random.RandomState`, *optional*):
312
- One or a list of [numpy generator(s)](TODO) to make generation deterministic.
313
- latents (`np.ndarray`, *optional*):
314
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
315
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
316
- tensor will ge generated by sampling using the supplied random `generator`.
317
- prompt_embeds (`np.ndarray`, *optional*):
318
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
319
- provided, text embeddings will be generated from `prompt` input argument.
320
- negative_prompt_embeds (`np.ndarray`, *optional*):
321
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
322
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
323
- argument.
324
- output_type (`str`, *optional*, defaults to `"pil"`):
325
- The output format of the generate image. Choose between
326
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
327
- return_dict (`bool`, *optional*, defaults to `True`):
328
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
329
- plain tuple.
330
- callback (`Callable`, *optional*):
331
- A function that will be called every `callback_steps` steps during inference. The function will be
332
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
333
- callback_steps (`int`, *optional*, defaults to 1):
334
- The frequency at which the `callback` function will be called. If not specified, the callback will be
335
- called at every step.
336
-
337
- Returns:
338
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
339
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
340
- When returning a tuple, the first element is a list with the generated images, and the second element is a
341
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
342
- (nsfw) content, according to the `safety_checker`.
343
- """
344
-
345
- # check inputs. Raise error if not correct
346
- self.check_inputs(
347
- prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
348
- )
349
-
350
- # define call parameters
351
- if prompt is not None and isinstance(prompt, str):
352
- batch_size = 1
353
- elif prompt is not None and isinstance(prompt, list):
354
- batch_size = len(prompt)
355
- else:
356
- batch_size = prompt_embeds.shape[0]
357
-
358
- if generator is None:
359
- generator = np.random
360
-
361
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
362
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
363
- # corresponds to doing no classifier free guidance.
364
- do_classifier_free_guidance = guidance_scale > 1.0
365
-
366
- prompt_embeds = self._encode_prompt(
367
- prompt,
368
- num_images_per_prompt,
369
- do_classifier_free_guidance,
370
- negative_prompt,
371
- prompt_embeds=prompt_embeds,
372
- negative_prompt_embeds=negative_prompt_embeds,
373
- )
374
-
375
- # get the initial random noise unless the user supplied it
376
- latents_dtype = prompt_embeds.dtype
377
- latents_shape = (batch_size * num_images_per_prompt, 4, height // 8, width // 8)
378
- if latents is None:
379
- latents = generator.randn(*latents_shape).astype(latents_dtype)
380
- elif latents.shape != latents_shape:
381
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
382
-
383
- # set timesteps
384
- self.scheduler.set_timesteps(num_inference_steps)
385
-
386
- latents = latents * np.float64(self.scheduler.init_noise_sigma)
387
-
388
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
389
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
390
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
391
- # and should be between [0, 1]
392
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
393
- extra_step_kwargs = {}
394
- if accepts_eta:
395
- extra_step_kwargs["eta"] = eta
396
-
397
- timestep_dtype = next(
398
- (input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)"
399
- )
400
- timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype]
401
-
402
- for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
403
- # expand the latents if we are doing classifier free guidance
404
- latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents
405
- latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t)
406
- latent_model_input = latent_model_input.cpu().numpy()
407
-
408
- # predict the noise residual
409
- timestep = np.array([t], dtype=timestep_dtype)
410
- noise_pred = self.unet(sample=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds)
411
- noise_pred = noise_pred[0]
412
-
413
- # perform guidance
414
- if do_classifier_free_guidance:
415
- noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2)
416
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
417
-
418
- # compute the previous noisy sample x_t -> x_t-1
419
- scheduler_output = self.scheduler.step(
420
- torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs
421
- )
422
- latents = scheduler_output.prev_sample.numpy()
423
-
424
- # call the callback, if provided
425
- if callback is not None and i % callback_steps == 0:
426
- callback(i, t, latents)
427
-
428
- latents = 1 / 0.18215 * latents
429
- # image = self.vae_decoder(latent_sample=latents)[0]
430
- # it seems likes there is a strange result for using half-precision vae decoder if batchsize>1
431
- image = np.concatenate(
432
- [self.vae_decoder(latent_sample=latents[i : i + 1])[0] for i in range(latents.shape[0])]
433
- )
434
-
435
- image = np.clip(image / 2 + 0.5, 0, 1)
436
- image = image.transpose((0, 2, 3, 1))
437
-
438
- if self.safety_checker is not None:
439
- safety_checker_input = self.feature_extractor(
440
- self.numpy_to_pil(image), return_tensors="np"
441
- ).pixel_values.astype(image.dtype)
442
-
443
- images, has_nsfw_concept = [], []
444
- for i in range(image.shape[0]):
445
- image_i, has_nsfw_concept_i = self.safety_checker(
446
- clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1]
447
- )
448
- images.append(image_i)
449
- has_nsfw_concept.append(has_nsfw_concept_i[0])
450
- image = np.concatenate(images)
451
- else:
452
- has_nsfw_concept = None
453
-
454
- if output_type == "pil":
455
- image = self.numpy_to_pil(image)
456
-
457
- if not return_dict:
458
- return (image, has_nsfw_concept)
459
-
460
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
461
-
462
-
463
- class StableDiffusionOnnxPipeline(OnnxStableDiffusionPipeline):
464
- def __init__(
465
- self,
466
- vae_encoder: OnnxRuntimeModel,
467
- vae_decoder: OnnxRuntimeModel,
468
- text_encoder: OnnxRuntimeModel,
469
- tokenizer: CLIPTokenizer,
470
- unet: OnnxRuntimeModel,
471
- scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
472
- safety_checker: OnnxRuntimeModel,
473
- feature_extractor: CLIPImageProcessor,
474
- ):
475
- deprecation_message = "Please use `OnnxStableDiffusionPipeline` instead of `StableDiffusionOnnxPipeline`."
476
- deprecate("StableDiffusionOnnxPipeline", "1.0.0", deprecation_message)
477
- super().__init__(
478
- vae_encoder=vae_encoder,
479
- vae_decoder=vae_decoder,
480
- text_encoder=text_encoder,
481
- tokenizer=tokenizer,
482
- unet=unet,
483
- scheduler=scheduler,
484
- safety_checker=safety_checker,
485
- feature_extractor=feature_extractor,
486
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py DELETED
@@ -1,92 +0,0 @@
1
- _base_ = '../faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py'
2
- rpn_weight = 0.7
3
- model = dict(
4
- rpn_head=dict(
5
- _delete_=True,
6
- type='CascadeRPNHead',
7
- num_stages=2,
8
- stages=[
9
- dict(
10
- type='StageCascadeRPNHead',
11
- in_channels=256,
12
- feat_channels=256,
13
- anchor_generator=dict(
14
- type='AnchorGenerator',
15
- scales=[8],
16
- ratios=[1.0],
17
- strides=[4, 8, 16, 32, 64]),
18
- adapt_cfg=dict(type='dilation', dilation=3),
19
- bridged_feature=True,
20
- sampling=False,
21
- with_cls=False,
22
- reg_decoded_bbox=True,
23
- bbox_coder=dict(
24
- type='DeltaXYWHBBoxCoder',
25
- target_means=(.0, .0, .0, .0),
26
- target_stds=(0.1, 0.1, 0.5, 0.5)),
27
- loss_bbox=dict(
28
- type='IoULoss', linear=True,
29
- loss_weight=10.0 * rpn_weight)),
30
- dict(
31
- type='StageCascadeRPNHead',
32
- in_channels=256,
33
- feat_channels=256,
34
- adapt_cfg=dict(type='offset'),
35
- bridged_feature=False,
36
- sampling=True,
37
- with_cls=True,
38
- reg_decoded_bbox=True,
39
- bbox_coder=dict(
40
- type='DeltaXYWHBBoxCoder',
41
- target_means=(.0, .0, .0, .0),
42
- target_stds=(0.05, 0.05, 0.1, 0.1)),
43
- loss_cls=dict(
44
- type='CrossEntropyLoss',
45
- use_sigmoid=True,
46
- loss_weight=1.0 * rpn_weight),
47
- loss_bbox=dict(
48
- type='IoULoss', linear=True,
49
- loss_weight=10.0 * rpn_weight))
50
- ]),
51
- roi_head=dict(
52
- bbox_head=dict(
53
- bbox_coder=dict(target_stds=[0.04, 0.04, 0.08, 0.08]),
54
- loss_cls=dict(
55
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.5),
56
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
57
- # model training and testing settings
58
- train_cfg=dict(
59
- rpn=[
60
- dict(
61
- assigner=dict(
62
- type='RegionAssigner', center_ratio=0.2, ignore_ratio=0.5),
63
- allowed_border=-1,
64
- pos_weight=-1,
65
- debug=False),
66
- dict(
67
- assigner=dict(
68
- type='MaxIoUAssigner',
69
- pos_iou_thr=0.7,
70
- neg_iou_thr=0.7,
71
- min_pos_iou=0.3,
72
- ignore_iof_thr=-1),
73
- sampler=dict(
74
- type='RandomSampler',
75
- num=256,
76
- pos_fraction=0.5,
77
- neg_pos_ub=-1,
78
- add_gt_as_proposals=False),
79
- allowed_border=-1,
80
- pos_weight=-1,
81
- debug=False)
82
- ],
83
- rpn_proposal=dict(max_per_img=300, nms=dict(iou_threshold=0.8)),
84
- rcnn=dict(
85
- assigner=dict(
86
- pos_iou_thr=0.65, neg_iou_thr=0.65, min_pos_iou=0.65),
87
- sampler=dict(type='RandomSampler', num=256))),
88
- test_cfg=dict(
89
- rpn=dict(max_per_img=300, nms=dict(iou_threshold=0.8)),
90
- rcnn=dict(score_thr=1e-3)))
91
- optimizer_config = dict(
92
- _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/docker/Dockerfile DELETED
@@ -1,75 +0,0 @@
1
- FROM nvidia/cuda:11.8.0-devel-ubuntu22.04 as builder
2
-
3
- RUN --mount=type=cache,target=/var/cache/apt,sharing=locked,rw apt-get update && \
4
- apt-get install --no-install-recommends -y git vim build-essential python3-dev python3-venv && \
5
- rm -rf /var/lib/apt/lists/*
6
-
7
- RUN git clone --depth=1 https://github.com/oobabooga/GPTQ-for-LLaMa /build
8
-
9
- WORKDIR /build
10
-
11
- RUN --mount=type=cache,target=/root/.cache/pip,rw \
12
- python3 -m venv /build/venv && \
13
- . /build/venv/bin/activate && \
14
- pip3 install --upgrade pip setuptools wheel && \
15
- pip3 install torch torchvision torchaudio && \
16
- pip3 install -r requirements.txt
17
-
18
- # https://developer.nvidia.com/cuda-gpus
19
- # for a rtx 2060: ARG TORCH_CUDA_ARCH_LIST="7.5"
20
- ARG TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5;5.0;6.0;6.1;7.0;7.5;8.0;8.6+PTX}"
21
- RUN . /build/venv/bin/activate && \
22
- python3 setup_cuda.py bdist_wheel -d .
23
-
24
- FROM nvidia/cuda:11.8.0-runtime-ubuntu22.04
25
-
26
- LABEL maintainer="Your Name <[email protected]>"
27
- LABEL description="Docker image for GPTQ-for-LLaMa and Text Generation WebUI"
28
-
29
- RUN --mount=type=cache,target=/var/cache/apt,sharing=locked,rw apt-get update && \
30
- apt-get install --no-install-recommends -y python3-dev libportaudio2 libasound-dev git python3 python3-pip make g++ ffmpeg && \
31
- rm -rf /var/lib/apt/lists/*
32
-
33
- RUN --mount=type=cache,target=/root/.cache/pip,rw pip3 install virtualenv
34
- RUN mkdir /app
35
-
36
- WORKDIR /app
37
-
38
- ARG WEBUI_VERSION
39
- RUN test -n "${WEBUI_VERSION}" && git reset --hard ${WEBUI_VERSION} || echo "Using provided webui source"
40
-
41
- # Create virtualenv
42
- RUN virtualenv /app/venv
43
- RUN --mount=type=cache,target=/root/.cache/pip,rw \
44
- . /app/venv/bin/activate && \
45
- pip3 install --upgrade pip setuptools wheel && \
46
- pip3 install torch torchvision torchaudio sentence_transformers xformers
47
-
48
- # Copy and install GPTQ-for-LLaMa
49
- COPY --from=builder /build /app/repositories/GPTQ-for-LLaMa
50
- RUN --mount=type=cache,target=/root/.cache/pip,rw \
51
- . /app/venv/bin/activate && \
52
- pip3 install /app/repositories/GPTQ-for-LLaMa/*.whl
53
-
54
- # Install main requirements
55
- COPY requirements.txt /app/requirements.txt
56
- RUN --mount=type=cache,target=/root/.cache/pip,rw \
57
- . /app/venv/bin/activate && \
58
- pip3 install -r requirements.txt
59
-
60
- COPY . /app/
61
-
62
- RUN cp /app/venv/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda118.so /app/venv/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cpu.so
63
-
64
- # Install extension requirements
65
- RUN --mount=type=cache,target=/root/.cache/pip,rw \
66
- . /app/venv/bin/activate && \
67
- for ext in /app/extensions/*/requirements.txt; do \
68
- cd "$(dirname "$ext")"; \
69
- pip3 install -r requirements.txt; \
70
- done
71
-
72
- ENV CLI_ARGS=""
73
-
74
- EXPOSE ${CONTAINER_PORT:-7860} ${CONTAINER_API_PORT:-5000} ${CONTAINER_API_STREAM_PORT:-5005}
75
- CMD . /app/venv/bin/activate && python3 server.py ${CLI_ARGS}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/openai/errors.py DELETED
@@ -1,31 +0,0 @@
1
- class OpenAIError(Exception):
2
- def __init__(self, message=None, code=500, internal_message=''):
3
- self.message = message
4
- self.code = code
5
- self.internal_message = internal_message
6
-
7
- def __repr__(self):
8
- return "%s(message=%r, code=%d)" % (
9
- self.__class__.__name__,
10
- self.message,
11
- self.code,
12
- )
13
-
14
-
15
- class InvalidRequestError(OpenAIError):
16
- def __init__(self, message, param, code=400, internal_message=''):
17
- super().__init__(message, code, internal_message)
18
- self.param = param
19
-
20
- def __repr__(self):
21
- return "%s(message=%r, code=%d, param=%s)" % (
22
- self.__class__.__name__,
23
- self.message,
24
- self.code,
25
- self.param,
26
- )
27
-
28
-
29
- class ServiceUnavailableError(OpenAIError):
30
- def __init__(self, message="Service unavailable, please try again later.", code=503, internal_message=''):
31
- super().__init__(message, code, internal_message)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/exp/upernet_global_small/test_config_g.py DELETED
@@ -1,38 +0,0 @@
1
- _base_ = [
2
- '../../configs/_base_/models/upernet_uniformer.py',
3
- '../../configs/_base_/datasets/ade20k.py',
4
- '../../configs/_base_/default_runtime.py',
5
- '../../configs/_base_/schedules/schedule_160k.py'
6
- ]
7
- model = dict(
8
- backbone=dict(
9
- type='UniFormer',
10
- embed_dim=[64, 128, 320, 512],
11
- layers=[3, 4, 8, 3],
12
- head_dim=64,
13
- drop_path_rate=0.25,
14
- windows=False,
15
- hybrid=False,
16
- ),
17
- decode_head=dict(
18
- in_channels=[64, 128, 320, 512],
19
- num_classes=150
20
- ),
21
- auxiliary_head=dict(
22
- in_channels=320,
23
- num_classes=150
24
- ))
25
-
26
- # AdamW optimizer, no weight decay for position embedding & layer norm in backbone
27
- optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
28
- paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
29
- 'relative_position_bias_table': dict(decay_mult=0.),
30
- 'norm': dict(decay_mult=0.)}))
31
-
32
- lr_config = dict(_delete_=True, policy='poly',
33
- warmup='linear',
34
- warmup_iters=1500,
35
- warmup_ratio=1e-6,
36
- power=1.0, min_lr=0.0, by_epoch=False)
37
-
38
- data=dict(samples_per_gpu=2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/gradio_scribble2image.py DELETED
@@ -1,92 +0,0 @@
1
- from share import *
2
- import config
3
-
4
- import cv2
5
- import einops
6
- import gradio as gr
7
- import numpy as np
8
- import torch
9
- import random
10
-
11
- from pytorch_lightning import seed_everything
12
- from annotator.util import resize_image, HWC3
13
- from cldm.model import create_model, load_state_dict
14
- from cldm.ddim_hacked import DDIMSampler
15
-
16
-
17
- model = create_model('./models/cldm_v15.yaml').cpu()
18
- model.load_state_dict(load_state_dict('./models/control_sd15_scribble.pth', location='cuda'))
19
- model = model.cuda()
20
- ddim_sampler = DDIMSampler(model)
21
-
22
-
23
- def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, guess_mode, strength, scale, seed, eta):
24
- with torch.no_grad():
25
- img = resize_image(HWC3(input_image), image_resolution)
26
- H, W, C = img.shape
27
-
28
- detected_map = np.zeros_like(img, dtype=np.uint8)
29
- detected_map[np.min(img, axis=2) < 127] = 255
30
-
31
- control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
32
- control = torch.stack([control for _ in range(num_samples)], dim=0)
33
- control = einops.rearrange(control, 'b h w c -> b c h w').clone()
34
-
35
- if seed == -1:
36
- seed = random.randint(0, 65535)
37
- seed_everything(seed)
38
-
39
- if config.save_memory:
40
- model.low_vram_shift(is_diffusing=False)
41
-
42
- cond = {"c_concat": [control], "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)]}
43
- un_cond = {"c_concat": None if guess_mode else [control], "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
44
- shape = (4, H // 8, W // 8)
45
-
46
- if config.save_memory:
47
- model.low_vram_shift(is_diffusing=True)
48
-
49
- model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else ([strength] * 13) # Magic number. IDK why. Perhaps because 0.825**12<0.01 but 0.826**12>0.01
50
- samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
51
- shape, cond, verbose=False, eta=eta,
52
- unconditional_guidance_scale=scale,
53
- unconditional_conditioning=un_cond)
54
-
55
- if config.save_memory:
56
- model.low_vram_shift(is_diffusing=False)
57
-
58
- x_samples = model.decode_first_stage(samples)
59
- x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0, 255).astype(np.uint8)
60
-
61
- results = [x_samples[i] for i in range(num_samples)]
62
- return [255 - detected_map] + results
63
-
64
-
65
- block = gr.Blocks().queue()
66
- with block:
67
- with gr.Row():
68
- gr.Markdown("## Control Stable Diffusion with Scribble Maps")
69
- with gr.Row():
70
- with gr.Column():
71
- input_image = gr.Image(source='upload', type="numpy")
72
- prompt = gr.Textbox(label="Prompt")
73
- run_button = gr.Button(label="Run")
74
- with gr.Accordion("Advanced options", open=False):
75
- num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1)
76
- image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512, step=64)
77
- strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01)
78
- guess_mode = gr.Checkbox(label='Guess Mode', value=False)
79
- ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
80
- scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
81
- seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, randomize=True)
82
- eta = gr.Number(label="eta (DDIM)", value=0.0)
83
- a_prompt = gr.Textbox(label="Added Prompt", value='best quality, extremely detailed')
84
- n_prompt = gr.Textbox(label="Negative Prompt",
85
- value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality')
86
- with gr.Column():
87
- result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto')
88
- ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, guess_mode, strength, scale, seed, eta]
89
- run_button.click(fn=process, inputs=ips, outputs=[result_gallery])
90
-
91
-
92
- block.launch(server_name='0.0.0.0')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ariharasudhan/YoloV5/utils/plots.py DELETED
@@ -1,575 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- """
3
- Plotting utils
4
- """
5
-
6
- import contextlib
7
- import math
8
- import os
9
- from copy import copy
10
- from pathlib import Path
11
- from urllib.error import URLError
12
-
13
- import cv2
14
- import matplotlib
15
- import matplotlib.pyplot as plt
16
- import numpy as np
17
- import pandas as pd
18
- import seaborn as sn
19
- import torch
20
- from PIL import Image, ImageDraw, ImageFont
21
-
22
- from utils import TryExcept, threaded
23
- from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_boxes, increment_path,
24
- is_ascii, xywh2xyxy, xyxy2xywh)
25
- from utils.metrics import fitness
26
- from utils.segment.general import scale_image
27
-
28
- # Settings
29
- RANK = int(os.getenv('RANK', -1))
30
- matplotlib.rc('font', **{'size': 11})
31
- matplotlib.use('Agg') # for writing to files only
32
-
33
-
34
- class Colors:
35
- # Ultralytics color palette https://ultralytics.com/
36
- def __init__(self):
37
- # hex = matplotlib.colors.TABLEAU_COLORS.values()
38
- hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',
39
- '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')
40
- self.palette = [self.hex2rgb(f'#{c}') for c in hexs]
41
- self.n = len(self.palette)
42
-
43
- def __call__(self, i, bgr=False):
44
- c = self.palette[int(i) % self.n]
45
- return (c[2], c[1], c[0]) if bgr else c
46
-
47
- @staticmethod
48
- def hex2rgb(h): # rgb order (PIL)
49
- return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
50
-
51
-
52
- colors = Colors() # create instance for 'from utils.plots import colors'
53
-
54
-
55
- def check_pil_font(font=FONT, size=10):
56
- # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary
57
- font = Path(font)
58
- font = font if font.exists() else (CONFIG_DIR / font.name)
59
- try:
60
- return ImageFont.truetype(str(font) if font.exists() else font.name, size)
61
- except Exception: # download if missing
62
- try:
63
- check_font(font)
64
- return ImageFont.truetype(str(font), size)
65
- except TypeError:
66
- check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374
67
- except URLError: # not online
68
- return ImageFont.load_default()
69
-
70
-
71
- class Annotator:
72
- # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations
73
- def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):
74
- assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.'
75
- non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic
76
- self.pil = pil or non_ascii
77
- if self.pil: # use PIL
78
- self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
79
- self.draw = ImageDraw.Draw(self.im)
80
- self.font = check_pil_font(font='Arial.Unicode.ttf' if non_ascii else font,
81
- size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12))
82
- else: # use cv2
83
- self.im = im
84
- self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width
85
-
86
- def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):
87
- # Add one xyxy box to image with label
88
- if self.pil or not is_ascii(label):
89
- self.draw.rectangle(box, width=self.lw, outline=color) # box
90
- if label:
91
- w, h = self.font.getsize(label) # text width, height
92
- outside = box[1] - h >= 0 # label fits outside box
93
- self.draw.rectangle(
94
- (box[0], box[1] - h if outside else box[1], box[0] + w + 1,
95
- box[1] + 1 if outside else box[1] + h + 1),
96
- fill=color,
97
- )
98
- # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0
99
- self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font)
100
- else: # cv2
101
- p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
102
- cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA)
103
- if label:
104
- tf = max(self.lw - 1, 1) # font thickness
105
- w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height
106
- outside = p1[1] - h >= 3
107
- p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3
108
- cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled
109
- cv2.putText(self.im,
110
- label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2),
111
- 0,
112
- self.lw / 3,
113
- txt_color,
114
- thickness=tf,
115
- lineType=cv2.LINE_AA)
116
-
117
- def masks(self, masks, colors, im_gpu=None, alpha=0.5):
118
- """Plot masks at once.
119
- Args:
120
- masks (tensor): predicted masks on cuda, shape: [n, h, w]
121
- colors (List[List[Int]]): colors for predicted masks, [[r, g, b] * n]
122
- im_gpu (tensor): img is in cuda, shape: [3, h, w], range: [0, 1]
123
- alpha (float): mask transparency: 0.0 fully transparent, 1.0 opaque
124
- """
125
- if self.pil:
126
- # convert to numpy first
127
- self.im = np.asarray(self.im).copy()
128
- if im_gpu is None:
129
- # Add multiple masks of shape(h,w,n) with colors list([r,g,b], [r,g,b], ...)
130
- if len(masks) == 0:
131
- return
132
- if isinstance(masks, torch.Tensor):
133
- masks = torch.as_tensor(masks, dtype=torch.uint8)
134
- masks = masks.permute(1, 2, 0).contiguous()
135
- masks = masks.cpu().numpy()
136
- # masks = np.ascontiguousarray(masks.transpose(1, 2, 0))
137
- masks = scale_image(masks.shape[:2], masks, self.im.shape)
138
- masks = np.asarray(masks, dtype=np.float32)
139
- colors = np.asarray(colors, dtype=np.float32) # shape(n,3)
140
- s = masks.sum(2, keepdims=True).clip(0, 1) # add all masks together
141
- masks = (masks @ colors).clip(0, 255) # (h,w,n) @ (n,3) = (h,w,3)
142
- self.im[:] = masks * alpha + self.im * (1 - s * alpha)
143
- else:
144
- if len(masks) == 0:
145
- self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255
146
- colors = torch.tensor(colors, device=im_gpu.device, dtype=torch.float32) / 255.0
147
- colors = colors[:, None, None] # shape(n,1,1,3)
148
- masks = masks.unsqueeze(3) # shape(n,h,w,1)
149
- masks_color = masks * (colors * alpha) # shape(n,h,w,3)
150
-
151
- inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1)
152
- mcs = (masks_color * inv_alph_masks).sum(0) * 2 # mask color summand shape(n,h,w,3)
153
-
154
- im_gpu = im_gpu.flip(dims=[0]) # flip channel
155
- im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3)
156
- im_gpu = im_gpu * inv_alph_masks[-1] + mcs
157
- im_mask = (im_gpu * 255).byte().cpu().numpy()
158
- self.im[:] = scale_image(im_gpu.shape, im_mask, self.im.shape)
159
- if self.pil:
160
- # convert im back to PIL and update draw
161
- self.fromarray(self.im)
162
-
163
- def rectangle(self, xy, fill=None, outline=None, width=1):
164
- # Add rectangle to image (PIL-only)
165
- self.draw.rectangle(xy, fill, outline, width)
166
-
167
- def text(self, xy, text, txt_color=(255, 255, 255), anchor='top'):
168
- # Add text to image (PIL-only)
169
- if anchor == 'bottom': # start y from font bottom
170
- w, h = self.font.getsize(text) # text width, height
171
- xy[1] += 1 - h
172
- self.draw.text(xy, text, fill=txt_color, font=self.font)
173
-
174
- def fromarray(self, im):
175
- # Update self.im from a numpy array
176
- self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
177
- self.draw = ImageDraw.Draw(self.im)
178
-
179
- def result(self):
180
- # Return annotated image as array
181
- return np.asarray(self.im)
182
-
183
-
184
- def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):
185
- """
186
- x: Features to be visualized
187
- module_type: Module type
188
- stage: Module stage within model
189
- n: Maximum number of feature maps to plot
190
- save_dir: Directory to save results
191
- """
192
- if 'Detect' not in module_type:
193
- batch, channels, height, width = x.shape # batch, channels, height, width
194
- if height > 1 and width > 1:
195
- f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename
196
-
197
- blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels
198
- n = min(n, channels) # number of plots
199
- fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols
200
- ax = ax.ravel()
201
- plt.subplots_adjust(wspace=0.05, hspace=0.05)
202
- for i in range(n):
203
- ax[i].imshow(blocks[i].squeeze()) # cmap='gray'
204
- ax[i].axis('off')
205
-
206
- LOGGER.info(f'Saving {f}... ({n}/{channels})')
207
- plt.savefig(f, dpi=300, bbox_inches='tight')
208
- plt.close()
209
- np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save
210
-
211
-
212
- def hist2d(x, y, n=100):
213
- # 2d histogram used in labels.png and evolve.png
214
- xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
215
- hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
216
- xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
217
- yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
218
- return np.log(hist[xidx, yidx])
219
-
220
-
221
- def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
222
- from scipy.signal import butter, filtfilt
223
-
224
- # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
225
- def butter_lowpass(cutoff, fs, order):
226
- nyq = 0.5 * fs
227
- normal_cutoff = cutoff / nyq
228
- return butter(order, normal_cutoff, btype='low', analog=False)
229
-
230
- b, a = butter_lowpass(cutoff, fs, order=order)
231
- return filtfilt(b, a, data) # forward-backward filter
232
-
233
-
234
- def output_to_target(output, max_det=300):
235
- # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] for plotting
236
- targets = []
237
- for i, o in enumerate(output):
238
- box, conf, cls = o[:max_det, :6].cpu().split((4, 1, 1), 1)
239
- j = torch.full((conf.shape[0], 1), i)
240
- targets.append(torch.cat((j, cls, xyxy2xywh(box), conf), 1))
241
- return torch.cat(targets, 0).numpy()
242
-
243
-
244
- @threaded
245
- def plot_images(images, targets, paths=None, fname='images.jpg', names=None):
246
- # Plot image grid with labels
247
- if isinstance(images, torch.Tensor):
248
- images = images.cpu().float().numpy()
249
- if isinstance(targets, torch.Tensor):
250
- targets = targets.cpu().numpy()
251
-
252
- max_size = 1920 # max image size
253
- max_subplots = 16 # max image subplots, i.e. 4x4
254
- bs, _, h, w = images.shape # batch size, _, height, width
255
- bs = min(bs, max_subplots) # limit plot images
256
- ns = np.ceil(bs ** 0.5) # number of subplots (square)
257
- if np.max(images[0]) <= 1:
258
- images *= 255 # de-normalise (optional)
259
-
260
- # Build Image
261
- mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
262
- for i, im in enumerate(images):
263
- if i == max_subplots: # if last batch has fewer images than we expect
264
- break
265
- x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
266
- im = im.transpose(1, 2, 0)
267
- mosaic[y:y + h, x:x + w, :] = im
268
-
269
- # Resize (optional)
270
- scale = max_size / ns / max(h, w)
271
- if scale < 1:
272
- h = math.ceil(scale * h)
273
- w = math.ceil(scale * w)
274
- mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h)))
275
-
276
- # Annotate
277
- fs = int((h + w) * ns * 0.01) # font size
278
- annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names)
279
- for i in range(i + 1):
280
- x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
281
- annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
282
- if paths:
283
- annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
284
- if len(targets) > 0:
285
- ti = targets[targets[:, 0] == i] # image targets
286
- boxes = xywh2xyxy(ti[:, 2:6]).T
287
- classes = ti[:, 1].astype('int')
288
- labels = ti.shape[1] == 6 # labels if no conf column
289
- conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred)
290
-
291
- if boxes.shape[1]:
292
- if boxes.max() <= 1.01: # if normalized with tolerance 0.01
293
- boxes[[0, 2]] *= w # scale to pixels
294
- boxes[[1, 3]] *= h
295
- elif scale < 1: # absolute coords need scale if image scales
296
- boxes *= scale
297
- boxes[[0, 2]] += x
298
- boxes[[1, 3]] += y
299
- for j, box in enumerate(boxes.T.tolist()):
300
- cls = classes[j]
301
- color = colors(cls)
302
- cls = names[cls] if names else cls
303
- if labels or conf[j] > 0.25: # 0.25 conf thresh
304
- label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}'
305
- annotator.box_label(box, label, color=color)
306
- annotator.im.save(fname) # save
307
-
308
-
309
- def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
310
- # Plot LR simulating training for full epochs
311
- optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
312
- y = []
313
- for _ in range(epochs):
314
- scheduler.step()
315
- y.append(optimizer.param_groups[0]['lr'])
316
- plt.plot(y, '.-', label='LR')
317
- plt.xlabel('epoch')
318
- plt.ylabel('LR')
319
- plt.grid()
320
- plt.xlim(0, epochs)
321
- plt.ylim(0)
322
- plt.savefig(Path(save_dir) / 'LR.png', dpi=200)
323
- plt.close()
324
-
325
-
326
- def plot_val_txt(): # from utils.plots import *; plot_val()
327
- # Plot val.txt histograms
328
- x = np.loadtxt('val.txt', dtype=np.float32)
329
- box = xyxy2xywh(x[:, :4])
330
- cx, cy = box[:, 0], box[:, 1]
331
-
332
- fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
333
- ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
334
- ax.set_aspect('equal')
335
- plt.savefig('hist2d.png', dpi=300)
336
-
337
- fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
338
- ax[0].hist(cx, bins=600)
339
- ax[1].hist(cy, bins=600)
340
- plt.savefig('hist1d.png', dpi=200)
341
-
342
-
343
- def plot_targets_txt(): # from utils.plots import *; plot_targets_txt()
344
- # Plot targets.txt histograms
345
- x = np.loadtxt('targets.txt', dtype=np.float32).T
346
- s = ['x targets', 'y targets', 'width targets', 'height targets']
347
- fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
348
- ax = ax.ravel()
349
- for i in range(4):
350
- ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}')
351
- ax[i].legend()
352
- ax[i].set_title(s[i])
353
- plt.savefig('targets.jpg', dpi=200)
354
-
355
-
356
- def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()
357
- # Plot file=study.txt generated by val.py (or plot all study*.txt in dir)
358
- save_dir = Path(file).parent if file else Path(dir)
359
- plot2 = False # plot additional results
360
- if plot2:
361
- ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel()
362
-
363
- fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
364
- # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:
365
- for f in sorted(save_dir.glob('study*.txt')):
366
- y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
367
- x = np.arange(y.shape[1]) if x is None else np.array(x)
368
- if plot2:
369
- s = ['P', 'R', '[email protected]', '[email protected]:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)']
370
- for i in range(7):
371
- ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
372
- ax[i].set_title(s[i])
373
-
374
- j = y[3].argmax() + 1
375
- ax2.plot(y[5, 1:j],
376
- y[3, 1:j] * 1E2,
377
- '.-',
378
- linewidth=2,
379
- markersize=8,
380
- label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
381
-
382
- ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
383
- 'k.-',
384
- linewidth=2,
385
- markersize=8,
386
- alpha=.25,
387
- label='EfficientDet')
388
-
389
- ax2.grid(alpha=0.2)
390
- ax2.set_yticks(np.arange(20, 60, 5))
391
- ax2.set_xlim(0, 57)
392
- ax2.set_ylim(25, 55)
393
- ax2.set_xlabel('GPU Speed (ms/img)')
394
- ax2.set_ylabel('COCO AP val')
395
- ax2.legend(loc='lower right')
396
- f = save_dir / 'study.png'
397
- print(f'Saving {f}...')
398
- plt.savefig(f, dpi=300)
399
-
400
-
401
- @TryExcept() # known issue https://github.com/ultralytics/yolov5/issues/5395
402
- def plot_labels(labels, names=(), save_dir=Path('')):
403
- # plot dataset labels
404
- LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ")
405
- c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
406
- nc = int(c.max() + 1) # number of classes
407
- x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height'])
408
-
409
- # seaborn correlogram
410
- sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))
411
- plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200)
412
- plt.close()
413
-
414
- # matplotlib labels
415
- matplotlib.use('svg') # faster
416
- ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()
417
- y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
418
- with contextlib.suppress(Exception): # color histogram bars by class
419
- [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # known issue #3195
420
- ax[0].set_ylabel('instances')
421
- if 0 < len(names) < 30:
422
- ax[0].set_xticks(range(len(names)))
423
- ax[0].set_xticklabels(list(names.values()), rotation=90, fontsize=10)
424
- else:
425
- ax[0].set_xlabel('classes')
426
- sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9)
427
- sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9)
428
-
429
- # rectangles
430
- labels[:, 1:3] = 0.5 # center
431
- labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000
432
- img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)
433
- for cls, *box in labels[:1000]:
434
- ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot
435
- ax[1].imshow(img)
436
- ax[1].axis('off')
437
-
438
- for a in [0, 1, 2, 3]:
439
- for s in ['top', 'right', 'left', 'bottom']:
440
- ax[a].spines[s].set_visible(False)
441
-
442
- plt.savefig(save_dir / 'labels.jpg', dpi=200)
443
- matplotlib.use('Agg')
444
- plt.close()
445
-
446
-
447
- def imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f=Path('images.jpg')):
448
- # Show classification image grid with labels (optional) and predictions (optional)
449
- from utils.augmentations import denormalize
450
-
451
- names = names or [f'class{i}' for i in range(1000)]
452
- blocks = torch.chunk(denormalize(im.clone()).cpu().float(), len(im),
453
- dim=0) # select batch index 0, block by channels
454
- n = min(len(blocks), nmax) # number of plots
455
- m = min(8, round(n ** 0.5)) # 8 x 8 default
456
- fig, ax = plt.subplots(math.ceil(n / m), m) # 8 rows x n/8 cols
457
- ax = ax.ravel() if m > 1 else [ax]
458
- # plt.subplots_adjust(wspace=0.05, hspace=0.05)
459
- for i in range(n):
460
- ax[i].imshow(blocks[i].squeeze().permute((1, 2, 0)).numpy().clip(0.0, 1.0))
461
- ax[i].axis('off')
462
- if labels is not None:
463
- s = names[labels[i]] + (f'—{names[pred[i]]}' if pred is not None else '')
464
- ax[i].set_title(s, fontsize=8, verticalalignment='top')
465
- plt.savefig(f, dpi=300, bbox_inches='tight')
466
- plt.close()
467
- if verbose:
468
- LOGGER.info(f"Saving {f}")
469
- if labels is not None:
470
- LOGGER.info('True: ' + ' '.join(f'{names[i]:3s}' for i in labels[:nmax]))
471
- if pred is not None:
472
- LOGGER.info('Predicted:' + ' '.join(f'{names[i]:3s}' for i in pred[:nmax]))
473
- return f
474
-
475
-
476
- def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve()
477
- # Plot evolve.csv hyp evolution results
478
- evolve_csv = Path(evolve_csv)
479
- data = pd.read_csv(evolve_csv)
480
- keys = [x.strip() for x in data.columns]
481
- x = data.values
482
- f = fitness(x)
483
- j = np.argmax(f) # max fitness index
484
- plt.figure(figsize=(10, 12), tight_layout=True)
485
- matplotlib.rc('font', **{'size': 8})
486
- print(f'Best results from row {j} of {evolve_csv}:')
487
- for i, k in enumerate(keys[7:]):
488
- v = x[:, 7 + i]
489
- mu = v[j] # best single result
490
- plt.subplot(6, 5, i + 1)
491
- plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
492
- plt.plot(mu, f.max(), 'k+', markersize=15)
493
- plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters
494
- if i % 5 != 0:
495
- plt.yticks([])
496
- print(f'{k:>15}: {mu:.3g}')
497
- f = evolve_csv.with_suffix('.png') # filename
498
- plt.savefig(f, dpi=200)
499
- plt.close()
500
- print(f'Saved {f}')
501
-
502
-
503
- def plot_results(file='path/to/results.csv', dir=''):
504
- # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv')
505
- save_dir = Path(file).parent if file else Path(dir)
506
- fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
507
- ax = ax.ravel()
508
- files = list(save_dir.glob('results*.csv'))
509
- assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.'
510
- for f in files:
511
- try:
512
- data = pd.read_csv(f)
513
- s = [x.strip() for x in data.columns]
514
- x = data.values[:, 0]
515
- for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]):
516
- y = data.values[:, j].astype('float')
517
- # y[y == 0] = np.nan # don't show zero values
518
- ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8)
519
- ax[i].set_title(s[j], fontsize=12)
520
- # if j in [8, 9, 10]: # share train and val loss y axes
521
- # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
522
- except Exception as e:
523
- LOGGER.info(f'Warning: Plotting error for {f}: {e}')
524
- ax[1].legend()
525
- fig.savefig(save_dir / 'results.png', dpi=200)
526
- plt.close()
527
-
528
-
529
- def profile_idetection(start=0, stop=0, labels=(), save_dir=''):
530
- # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection()
531
- ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel()
532
- s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS']
533
- files = list(Path(save_dir).glob('frames*.txt'))
534
- for fi, f in enumerate(files):
535
- try:
536
- results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows
537
- n = results.shape[1] # number of rows
538
- x = np.arange(start, min(stop, n) if stop else n)
539
- results = results[:, x]
540
- t = (results[0] - results[0].min()) # set t0=0s
541
- results[0] = x
542
- for i, a in enumerate(ax):
543
- if i < len(results):
544
- label = labels[fi] if len(labels) else f.stem.replace('frames_', '')
545
- a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5)
546
- a.set_title(s[i])
547
- a.set_xlabel('time (s)')
548
- # if fi == len(files) - 1:
549
- # a.set_ylim(bottom=0)
550
- for side in ['top', 'right']:
551
- a.spines[side].set_visible(False)
552
- else:
553
- a.remove()
554
- except Exception as e:
555
- print(f'Warning: Plotting error for {f}; {e}')
556
- ax[1].legend()
557
- plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200)
558
-
559
-
560
- def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True):
561
- # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop
562
- xyxy = torch.tensor(xyxy).view(-1, 4)
563
- b = xyxy2xywh(xyxy) # boxes
564
- if square:
565
- b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square
566
- b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad
567
- xyxy = xywh2xyxy(b).long()
568
- clip_boxes(xyxy, im.shape)
569
- crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)]
570
- if save:
571
- file.parent.mkdir(parents=True, exist_ok=True) # make directory
572
- f = str(increment_path(file).with_suffix('.jpg'))
573
- # cv2.imwrite(f, crop) # save BGR, https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue
574
- Image.fromarray(crop[..., ::-1]).save(f, quality=95, subsampling=0) # save RGB
575
- return crop
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/namespaces.py DELETED
@@ -1,107 +0,0 @@
1
- import os
2
- from distutils import log
3
- import itertools
4
-
5
-
6
- flatten = itertools.chain.from_iterable
7
-
8
-
9
- class Installer:
10
-
11
- nspkg_ext = '-nspkg.pth'
12
-
13
- def install_namespaces(self):
14
- nsp = self._get_all_ns_packages()
15
- if not nsp:
16
- return
17
- filename, ext = os.path.splitext(self._get_target())
18
- filename += self.nspkg_ext
19
- self.outputs.append(filename)
20
- log.info("Installing %s", filename)
21
- lines = map(self._gen_nspkg_line, nsp)
22
-
23
- if self.dry_run:
24
- # always generate the lines, even in dry run
25
- list(lines)
26
- return
27
-
28
- with open(filename, 'wt') as f:
29
- f.writelines(lines)
30
-
31
- def uninstall_namespaces(self):
32
- filename, ext = os.path.splitext(self._get_target())
33
- filename += self.nspkg_ext
34
- if not os.path.exists(filename):
35
- return
36
- log.info("Removing %s", filename)
37
- os.remove(filename)
38
-
39
- def _get_target(self):
40
- return self.target
41
-
42
- _nspkg_tmpl = (
43
- "import sys, types, os",
44
- "has_mfs = sys.version_info > (3, 5)",
45
- "p = os.path.join(%(root)s, *%(pth)r)",
46
- "importlib = has_mfs and __import__('importlib.util')",
47
- "has_mfs and __import__('importlib.machinery')",
48
- (
49
- "m = has_mfs and "
50
- "sys.modules.setdefault(%(pkg)r, "
51
- "importlib.util.module_from_spec("
52
- "importlib.machinery.PathFinder.find_spec(%(pkg)r, "
53
- "[os.path.dirname(p)])))"
54
- ),
55
- (
56
- "m = m or "
57
- "sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))"
58
- ),
59
- "mp = (m or []) and m.__dict__.setdefault('__path__',[])",
60
- "(p not in mp) and mp.append(p)",
61
- )
62
- "lines for the namespace installer"
63
-
64
- _nspkg_tmpl_multi = (
65
- 'm and setattr(sys.modules[%(parent)r], %(child)r, m)',
66
- )
67
- "additional line(s) when a parent package is indicated"
68
-
69
- def _get_root(self):
70
- return "sys._getframe(1).f_locals['sitedir']"
71
-
72
- def _gen_nspkg_line(self, pkg):
73
- pth = tuple(pkg.split('.'))
74
- root = self._get_root()
75
- tmpl_lines = self._nspkg_tmpl
76
- parent, sep, child = pkg.rpartition('.')
77
- if parent:
78
- tmpl_lines += self._nspkg_tmpl_multi
79
- return ';'.join(tmpl_lines) % locals() + '\n'
80
-
81
- def _get_all_ns_packages(self):
82
- """Return sorted list of all package namespaces"""
83
- pkgs = self.distribution.namespace_packages or []
84
- return sorted(flatten(map(self._pkg_names, pkgs)))
85
-
86
- @staticmethod
87
- def _pkg_names(pkg):
88
- """
89
- Given a namespace package, yield the components of that
90
- package.
91
-
92
- >>> names = Installer._pkg_names('a.b.c')
93
- >>> set(names) == set(['a', 'a.b', 'a.b.c'])
94
- True
95
- """
96
- parts = pkg.split('.')
97
- while parts:
98
- yield '.'.join(parts)
99
- parts.pop()
100
-
101
-
102
- class DevelopInstaller(Installer):
103
- def _get_root(self):
104
- return repr(str(self.egg_path))
105
-
106
- def _get_target(self):
107
- return self.egg_link
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Audio-AGI/AudioSep/models/CLAP/training/distributed.py DELETED
@@ -1,150 +0,0 @@
1
- import os
2
-
3
- import torch
4
- import socket
5
-
6
- try:
7
- import horovod.torch as hvd
8
- except ImportError:
9
- hvd = None
10
-
11
-
12
- def is_global_master(args):
13
- return args.rank == 0
14
-
15
-
16
- def is_local_master(args):
17
- return args.local_rank == 0
18
-
19
-
20
- def is_master(args, local=False):
21
- return is_local_master(args) if local else is_global_master(args)
22
-
23
-
24
- def is_using_horovod():
25
- # NOTE w/ horovod run, OMPI vars should be set, but w/ SLURM PMI vars will be set
26
- # Differentiating between horovod and DDP use via SLURM may not be possible, so horovod arg still required...
27
- ompi_vars = ["OMPI_COMM_WORLD_RANK", "OMPI_COMM_WORLD_SIZE"]
28
- pmi_vars = ["PMI_RANK", "PMI_SIZE"]
29
- if all([var in os.environ for var in ompi_vars]) or all(
30
- [var in os.environ for var in pmi_vars]
31
- ):
32
- return True
33
- else:
34
- return False
35
-
36
-
37
- def is_using_distributed():
38
- if "WORLD_SIZE" in os.environ:
39
- return int(os.environ["WORLD_SIZE"]) > 1
40
- if "SLURM_NTASKS" in os.environ:
41
- return int(os.environ["SLURM_NTASKS"]) > 1
42
- return False
43
-
44
-
45
- def world_info_from_env():
46
- local_rank = 0
47
- for v in (
48
- "SLURM_LOCALID",
49
- "MPI_LOCALRANKID",
50
- "OMPI_COMM_WORLD_LOCAL_RANK",
51
- "LOCAL_RANK",
52
- ):
53
- if v in os.environ:
54
- local_rank = int(os.environ[v])
55
- break
56
- global_rank = 0
57
- for v in ("SLURM_PROCID", "PMI_RANK", "OMPI_COMM_WORLD_RANK", "RANK"):
58
- if v in os.environ:
59
- global_rank = int(os.environ[v])
60
- break
61
- world_size = 1
62
- for v in ("SLURM_NTASKS", "PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "WORLD_SIZE"):
63
- if v in os.environ:
64
- world_size = int(os.environ[v])
65
- break
66
-
67
- return local_rank, global_rank, world_size
68
-
69
-
70
- def init_distributed_device(args):
71
- # Distributed training = training on more than one GPU.
72
- # Works in both single and multi-node scenarios.
73
- args.distributed = False
74
- args.world_size = 1
75
- args.rank = 0 # global rank
76
- args.local_rank = 0
77
- if args.horovod:
78
- assert hvd is not None, "Horovod is not installed"
79
- hvd.init()
80
- world_size = int(os.environ["OMPI_COMM_WORLD_SIZE"])
81
- world_rank = int(os.environ["OMPI_COMM_WORLD_RANK"])
82
- local_rank = int(os.environ["OMPI_COMM_WORLD_LOCAL_RANK"])
83
- args.local_rank = local_rank
84
- args.rank = world_rank
85
- args.world_size = world_size
86
- # args.local_rank = int(hvd.local_rank())
87
- # args.rank = hvd.rank()
88
- # args.world_size = hvd.size()
89
- args.distributed = True
90
- os.environ["LOCAL_RANK"] = str(args.local_rank)
91
- os.environ["RANK"] = str(args.rank)
92
- os.environ["WORLD_SIZE"] = str(args.world_size)
93
- print(
94
- f"Distributed training: local_rank={args.local_rank}, "
95
- f"rank={args.rank}, world_size={args.world_size}, "
96
- f"hostname={socket.gethostname()}, pid={os.getpid()}"
97
- )
98
- elif is_using_distributed():
99
- if "SLURM_PROCID" in os.environ:
100
- # DDP via SLURM
101
- args.local_rank, args.rank, args.world_size = world_info_from_env()
102
- # SLURM var -> torch.distributed vars in case needed
103
- os.environ["LOCAL_RANK"] = str(args.local_rank)
104
- os.environ["RANK"] = str(args.rank)
105
- os.environ["WORLD_SIZE"] = str(args.world_size)
106
- torch.distributed.init_process_group(
107
- backend=args.dist_backend,
108
- init_method=args.dist_url,
109
- world_size=args.world_size,
110
- rank=args.rank,
111
- )
112
- elif "OMPI_COMM_WORLD_SIZE" in os.environ: # using Summit cluster
113
- world_size = int(os.environ["OMPI_COMM_WORLD_SIZE"])
114
- world_rank = int(os.environ["OMPI_COMM_WORLD_RANK"])
115
- local_rank = int(os.environ["OMPI_COMM_WORLD_LOCAL_RANK"])
116
- args.local_rank = local_rank
117
- args.rank = world_rank
118
- args.world_size = world_size
119
- torch.distributed.init_process_group(
120
- backend=args.dist_backend,
121
- init_method=args.dist_url,
122
- world_size=args.world_size,
123
- rank=args.rank,
124
- )
125
- else:
126
- # DDP via torchrun, torch.distributed.launch
127
- args.local_rank, _, _ = world_info_from_env()
128
- torch.distributed.init_process_group(
129
- backend=args.dist_backend, init_method=args.dist_url
130
- )
131
- args.world_size = torch.distributed.get_world_size()
132
- args.rank = torch.distributed.get_rank()
133
- args.distributed = True
134
- print(
135
- f"Distributed training: local_rank={args.local_rank}, "
136
- f"rank={args.rank}, world_size={args.world_size}, "
137
- f"hostname={socket.gethostname()}, pid={os.getpid()}"
138
- )
139
-
140
- if torch.cuda.is_available():
141
- if args.distributed and not args.no_set_device_rank:
142
- device = "cuda:%d" % args.local_rank
143
- else:
144
- device = "cuda:0"
145
- torch.cuda.set_device(device)
146
- else:
147
- device = "cpu"
148
- args.device = device
149
- device = torch.device(device)
150
- return device
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py DELETED
@@ -1,90 +0,0 @@
1
- from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
2
- import pyworld
3
- import numpy as np
4
-
5
-
6
- class DioF0Predictor(F0Predictor):
7
- def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
8
- self.hop_length = hop_length
9
- self.f0_min = f0_min
10
- self.f0_max = f0_max
11
- self.sampling_rate = sampling_rate
12
-
13
- def interpolate_f0(self, f0):
14
- """
15
- 对F0进行插值处理
16
- """
17
-
18
- data = np.reshape(f0, (f0.size, 1))
19
-
20
- vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
21
- vuv_vector[data > 0.0] = 1.0
22
- vuv_vector[data <= 0.0] = 0.0
23
-
24
- ip_data = data
25
-
26
- frame_number = data.size
27
- last_value = 0.0
28
- for i in range(frame_number):
29
- if data[i] <= 0.0:
30
- j = i + 1
31
- for j in range(i + 1, frame_number):
32
- if data[j] > 0.0:
33
- break
34
- if j < frame_number - 1:
35
- if last_value > 0.0:
36
- step = (data[j] - data[i - 1]) / float(j - i)
37
- for k in range(i, j):
38
- ip_data[k] = data[i - 1] + step * (k - i + 1)
39
- else:
40
- for k in range(i, j):
41
- ip_data[k] = data[j]
42
- else:
43
- for k in range(i, frame_number):
44
- ip_data[k] = last_value
45
- else:
46
- ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
47
- last_value = data[i]
48
-
49
- return ip_data[:, 0], vuv_vector[:, 0]
50
-
51
- def resize_f0(self, x, target_len):
52
- source = np.array(x)
53
- source[source < 0.001] = np.nan
54
- target = np.interp(
55
- np.arange(0, len(source) * target_len, len(source)) / target_len,
56
- np.arange(0, len(source)),
57
- source,
58
- )
59
- res = np.nan_to_num(target)
60
- return res
61
-
62
- def compute_f0(self, wav, p_len=None):
63
- if p_len is None:
64
- p_len = wav.shape[0] // self.hop_length
65
- f0, t = pyworld.dio(
66
- wav.astype(np.double),
67
- fs=self.sampling_rate,
68
- f0_floor=self.f0_min,
69
- f0_ceil=self.f0_max,
70
- frame_period=1000 * self.hop_length / self.sampling_rate,
71
- )
72
- f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
73
- for index, pitch in enumerate(f0):
74
- f0[index] = round(pitch, 1)
75
- return self.interpolate_f0(self.resize_f0(f0, p_len))[0]
76
-
77
- def compute_f0_uv(self, wav, p_len=None):
78
- if p_len is None:
79
- p_len = wav.shape[0] // self.hop_length
80
- f0, t = pyworld.dio(
81
- wav.astype(np.double),
82
- fs=self.sampling_rate,
83
- f0_floor=self.f0_min,
84
- f0_ceil=self.f0_max,
85
- frame_period=1000 * self.hop_length / self.sampling_rate,
86
- )
87
- f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
88
- for index, pitch in enumerate(f0):
89
- f0[index] = round(pitch, 1)
90
- return self.interpolate_f0(self.resize_f0(f0, p_len))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Cmo Descargar Gratis Fuego Mx En El Ordenador Porttil Sin Bluestacks.md DELETED
@@ -1,39 +0,0 @@
1
-
2
- <h1>Cómo descargar gratis fuego Max en el ordenador portátil sin Bluestacks</h1>
3
- <p>Free Fire Max es un popular juego battle royale que ofrece una experiencia de juego premium con gráficos ultra HD y efectos impresionantes. El juego está diseñado para dispositivos móviles, pero algunos jugadores pueden querer disfrutarlo en una pantalla más grande con mejores controles. Sin embargo, no todo el mundo tiene un ordenador portátil de gran alcance que puede ejecutar emuladores de Android como Bluestacks sin problemas. Si usted es uno de ellos, no se preocupe, hay otras maneras de jugar fuego libre máx en el ordenador portátil sin bluestacks. En este artículo, le mostraremos dos métodos que puede tratar de descargar y jugar gratis fuego máx en el ordenador portátil sin bluestacks. </p>
4
- <h2>Método 1: Usando el emulador de Gameloop</h2>
5
- <p>Gameloop es un emulador de Android desarrollado por Tencent, la misma empresa que creó el fuego libre máx. Está optimizado para juegos y admite varios títulos, incluido el fuego libre máx. Estos son los pasos para usar Gameloop para jugar el fuego libre máximo en la computadora portátil sin bluestacks:</p>
6
- <h2>cómo descargar gratis fuego máx en el ordenador portátil sin bluestacks</h2><br /><p><b><b>Download Zip</b> &harr; <a href="https://bltlly.com/2v6JdO">https://bltlly.com/2v6JdO</a></b></p><br /><br />
7
- <ol>
8
- <li><b>Descargar Gameloop</b> desde el <a href="( 1 )">sitio web oficial</a>. </li>
9
- <li><b>Instalar y ejecutar Gameloop</b> en su ordenador portátil. Es posible que tenga que permitir algunos permisos y aceptar algunos términos y condiciones. </li>
10
- <li><b>Búsqueda de fuego libre máx</b> en la pestaña del juego y haga clic en el botón de descarga. </li>
11
- <li><b>Espera la descarga y la instalación</b> para completar y lanzar el juego desde la pantalla de inicio. </li>
12
- </ol>
13
- <p>Ahora puedes jugar a fuego libre máx en tu laptop usando el emulador de Gameloop. Puede personalizar la configuración, los controles y los gráficos según sus preferencias. También puede utilizar el teclado y el ratón para jugar el juego más cómodamente. </p>
14
- <h2>Método 2: Usando SO Prime</h2>
15
-
16
- <ol>
17
- <li><b>Descargar Prime OS</b> desde su <a href="( 2 )">sitio web oficial</a>. Elija la versión que se adapte a las especificaciones de su computadora portátil. </li>
18
- <li><b>Instale Prime OS</b> en el disco duro del sistema. Puede usar una unidad flash USB o un DVD para crear un medio de arranque. Siga las instrucciones del sitio web para completar el proceso de instalación. </li>
19
- <li><b>Reinicie el sistema</b> y seleccione Sistema operativo primario en el menú de arranque. El primer arranque puede tardar algún tiempo en configurar los ajustes. </li>
20
- <li><b>Inicie sesión en la cuenta de Google Play</b> e instale fire max gratis desde la aplicación Playstore. También puede descargarlo de otras fuentes si lo prefiere. </li>
21
- </ol>
22
- <p>Ahora puede jugar fuego libre máx en su computadora portátil usando Prime OS. Puede cambiar entre Windows y Android en cualquier momento reiniciando su sistema. También puede disfrutar de otras características de Prime OS, como AWD Launcher, acceso root y soporte multiventana. </p>
23
- <h2>Conclusión</h2>
24
- <p>En este artículo, le hemos mostrado cómo descargar gratis fuego máximo en el ordenador portátil sin bluestacks utilizando dos métodos: emulador de Gameloop y Prime OS. Ambos métodos tienen sus propias ventajas y desventajas, por lo que puede elegir el que se adapte a sus necesidades y preferencias. Jugar fuego libre máx en el ordenador portátil sin bluestacks puede darle una mejor experiencia de juego con gráficos más altos, un rendimiento más rápido y controles más fáciles. Sin embargo, también debe ser consciente de los riesgos y desafíos potenciales, como problemas de compatibilidad, amenazas de seguridad y requisitos del sistema. Esperamos que este artículo le ha ayudado a aprender cómo descargar gratis fuego máx en el ordenador portátil sin bluestacks. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. </p>
25
- <h2>Preguntas frecuentes</h2>
26
- <p>Aquí hay algunas preguntas frecuentes relacionadas con el tema de cómo descargar gratis fuego máx en el ordenador portátil sin bluestacks:</p>
27
- <h3> ¿Es compatible con Windows 10? </h3>
28
-
29
- <h3>¿Es el fuego libre máx mejor que el fuego libre? </h3>
30
- <p>Free fire max es una versión mejorada de free fire que ofrece gráficos, efectos y características mejorados. También tiene un mapa más grande, más armas y más modos de juego. Sin embargo, el fuego libre máximo también requiere más espacio de almacenamiento, RAM y CPU que el fuego libre. Por lo tanto, puede no funcionar sin problemas en dispositivos de gama baja. </p>
31
- <h3>¿Puedo jugar a fuego libre máx con jugadores de fuego libre? </h3>
32
- <p>Sí, puedes jugar a fuego libre máximo con jugadores de fuego libre, ya que ambos juegos comparten el mismo servidor y sistema de cuenta. También puede transferir su progreso y los datos de fuego libre a fuego libre máx sin ningún tipo de molestia. </p>
33
- <h3> ¿Cuáles son los requisitos mínimos del sistema para jugar fuego libre máx en la computadora portátil? </h3>
34
- <p>Los requisitos mínimos del sistema para jugar fuego libre máx en la computadora portátil varían dependiendo del método que utilice. Para el emulador de Gameloop, necesita al menos 4 GB de RAM, 4 GB de espacio en disco y un procesador de doble núcleo. Para Prime OS, necesita al menos 2 GB de RAM, 16 GB de espacio en disco y un procesador de 64 bits. </p>
35
- <h3> ¿Es seguro jugar fuego libre máximo en el ordenador portátil sin bluestacks? </h3>
36
- <p>Jugando fuego libre máx en el ordenador portátil sin bluestacks es generalmente seguro, siempre y cuando se utiliza un método confiable y confiable. Sin embargo, también debe tener cuidado con las fuentes que descarga, los permisos que otorga y el software antivirus que usa. También debes evitar usar hacks o trucos que puedan comprometer tu cuenta o dispositivo. </p>
37
- <p></p> 64aa2da5cf<br />
38
- <br />
39
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Bluecurve Home App.md DELETED
@@ -1,76 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar y usar la aplicación de inicio BlueCurve</h1>
3
- <p>¿Quieres tener una mejor experiencia WiFi en casa? ¿Quieres gestionar tu red doméstica y dispositivos conectados con facilidad? ¿Quieres disfrutar de los beneficios de la integración del hogar inteligente? Si respondiste sí a cualquiera de estas preguntas, entonces necesitas la aplicación BlueCurve Home. </p>
4
- <h2>descargar bluecurve home app</h2><br /><p><b><b>Download File</b> &#9658; <a href="https://bltlly.com/2v6Ldi">https://bltlly.com/2v6Ldi</a></b></p><br /><br />
5
- <p>BlueCurve Home es un panel digital que le permite configurar, monitorear y controlar su red WiFi doméstica desde cualquier lugar con su dispositivo conectado. Está disponible de forma gratuita para los clientes de Internet de Shaw que tengan una pasarela Fibre+. En este artículo, le mostraremos cómo descargar y usar la aplicación, así como sus características, beneficios y consejos para solucionar problemas. </p>
6
- <h2>Cómo descargar la aplicación para dispositivos Android e iOS</h2>
7
- <p>Descargar la aplicación BlueCurve Home es fácil. Solo tienes que seguir estos pasos:</p>
8
- <ol>
9
- <li>Ir al <a href="( 1 )">Google Play Store</a> o el <a href="( 3 )">Apple App Store</a> en su dispositivo. </li>
10
- <li>Buscar "Shaw BlueCurve Inicio" y toque en el icono de la aplicación. </li>
11
- <li>Toque en Instalar o Obtener para descargar la aplicación de forma gratuita. </li>
12
- <li>Espere a que la aplicación termine de descargar e instalar en su dispositivo. </li>
13
- </ol>
14
- <h2>Cómo iniciar sesión con su ID y contraseña de Shaw</h2>
15
- <p>Para usar la aplicación BlueCurve Home, debe iniciar sesión con su nombre de usuario y contraseña de Shaw ID. Si aún no tiene un ID de Shaw, puede crear uno en <a href="https://register.shaw.ca/">https://register.shaw.ca/</a>. Una vez que tenga su ID de Shaw, siga estos pasos:</p>
16
- <ol>
17
- <li>Abra la aplicación BlueCurve Home en su dispositivo. </li>
18
- <li>Introduzca su nombre de usuario y contraseña de Shaw en los campos proporcionados. </li>
19
- <li>Toque en Iniciar sesión para acceder a la aplicación. </li>
20
- </ol>
21
-
22
- <h2>Características de BlueCurve Inicio</h2>
23
- <p>La aplicación BlueCurve Home tiene varias características que le ayudan a personalizar, controlar y proteger su red doméstica. Estas son algunas de ellas:</p>
24
- <p></p>
25
- <h3>Configuración y personalización de WiFi</h3>
26
- <p>Con esta función, puede:</p>
27
- <ul>
28
- <li> Ver dispositivos conectados a su red doméstica y darles apodos para una fácil referencia. </li>
29
- <li>Crear perfiles para que pueda asignar dispositivos a los miembros de su familia. </li>
30
- <li> Vea y cambie fácilmente su nombre y contraseña de red WiFi. </li>
31
- <li>Comparte tu WiFi con invitados o familiares con un código QR o un enlace. </li>
32
- <li> Ver y gestionar todas las notificaciones de red anteriores desde el centro de notificaciones. </li>
33
- </ul>
34
- <h3>Controles parentales</h3>
35
- <p>Con esta función, puede:</p>
36
- <ul>
37
- <li>Configurar controles parentales para bloquear contenido inapropiado y sitios web para sus hijos. </li>
38
- <li>Pausa el acceso a Internet para dispositivos o perfiles específicos con un toque de un botón. </li>
39
- <li>Monitorear y administrar el tiempo activo y el uso de datos de los miembros de su familia. </li>
40
- <li>Proteja su red de malware, phishing y otras amenazas en línea con Shaw Secure de McAfee.</li>
41
- </ul>
42
- <h3>Solución de problemas</h3>
43
- <p>Con esta función, puede:</p>
44
- <ul>
45
- <li>Ejecute una prueba de velocidad para comprobar el rendimiento de Internet y compararlo con su plan. </li>
46
- <li>Reinicie su puerta de enlace de forma remota si experimenta algún problema de conectividad. </li>
47
- <li>Encuentra consejos y soluciones para problemas WiFi comunes en la sección de ayuda de la aplicación. </li>
48
- <li>Póngase en contacto con el soporte de Shaw directamente desde la aplicación si necesita más ayuda. </li>
49
- </ul>
50
- <h2>Beneficios de BlueCurve Inicio</h2>
51
- <p>La aplicación BlueCurve Home no solo hace que su red WiFi sea más fácil de administrar, sino que también mejora su experiencia WiFi de muchas maneras. Estos son algunos de los beneficios de usar la aplicación:</p>
52
- <h3>Experiencia WiFi mejorada</h3>
53
-
54
- <h3>Integración en el hogar inteligente</h3>
55
- <p>La aplicación BlueCurve Home también funciona con otros dispositivos domésticos inteligentes que mejoran su comodidad, comodidad y entretenimiento. Por ejemplo, puede usar la aplicación para conectarse y administrar BlueCurve Pods, que son dispositivos pequeños que extienden su cobertura WiFi a áreas de difícil acceso de su hogar. También puede utilizar la aplicación para integrar BlueCurve TV, que es el mejor servicio de televisión de Shaw que le permite ver contenido en vivo y bajo demanda en cualquier pantalla. Además, puede usar la aplicación para controlar otros dispositivos domésticos inteligentes, como luces, termostatos, cámaras y más, con comandos de voz o gestos. </p>
56
- <h3>Atención al cliente</h3>
57
- <p>Si alguna vez necesita ayuda con BlueCurve Home o cualquier otro servicio de Shaw, puede contar con su equipo de atención al cliente para ayudarle. Puede contactarlos por teléfono, chat, correo electrónico o redes sociales. También puede utilizar la aplicación para programar una devolución de llamada o una cita de servicio a su conveniencia. El equipo de atención al cliente de Shaw está disponible 24/7 para responder a sus preguntas y resolver sus problemas. </p>
58
- <h2>Conclusión</h2>
59
- <p>BlueCurve Home es una aplicación imprescindible para los clientes de Internet de Shaw que quieren tener más control y flexibilidad sobre su red doméstica y dispositivos conectados. Le permite configurar, monitorear y personalizar su red WiFi desde cualquier lugar con su dispositivo. También ofrece funciones como controles parentales, solución de problemas e integración del hogar inteligente que mejoran su experiencia WiFi y hacen su vida más fácil. Para descargar la aplicación gratis, visita Google Play Store o Apple App Store hoy. </p>
60
- <h2>Preguntas frecuentes</h2>
61
- <p>Aquí hay algunas preguntas y respuestas frecuentes sobre BlueCurve Home:</p>
62
- <h4>Q: ¿Necesito pagar extra para usar BlueCurve Home? </h4>
63
- <p>A: No, BlueCurve Home está incluido con su servicio de Internet Shaw sin costo adicional. Sin embargo, necesita tener un módem Fibre+ Gateway para usar la aplicación. </p>
64
- <h4>Q: ¿Cuántos dispositivos puedo conectar a mi red BlueCurve Home? </h4>
65
-
66
- <h4>Q: ¿Cómo puedo actualizar la aplicación BlueCurve Home? </h4>
67
- <p>A: La aplicación BlueCurve Home se actualizará automáticamente cuando haya una nueva versión disponible. También puede comprobar las actualizaciones manualmente yendo a la tienda de aplicaciones de su dispositivo y tocando en Actualizar si hay una. </p>
68
- <h4>Q: ¿Cómo puedo eliminar la aplicación BlueCurve Home? </h4>
69
- <p>A: Si desea eliminar la aplicación BlueCurve Home de su dispositivo, puede seguir estos pasos:</p>
70
- <ol>
71
- <li>En los dispositivos Android, toque y mantenga pulsado el icono de la aplicación hasta que aparezca un menú. Luego toque en Desinstalar y confirmar. </li>
72
- <li>En los dispositivos iOS, toque y mantenga pulsado el icono de la aplicación hasta que comience a temblar. Luego toque en el icono de X en la esquina superior izquierda del icono de la aplicación y confirme. </li>
73
- </ol> <h4>Q: ¿Cómo accedo a la aplicación BlueCurve Home en mi computadora? </h4>
74
- <p>A: Puede acceder a la aplicación BlueCurve Home en su computadora yendo a <a href="https://home.shaw.ca/">https://home.shaw.ca/</a> e iniciando sesión con su ID de Shaw y contraseña. Puede utilizar cualquier navegador web que soporte HTML5, como Chrome, Firefox, Safari o Edge.</p> 64aa2da5cf<br />
75
- <br />
76
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/formatters/_mapping.py DELETED
@@ -1,23 +0,0 @@
1
- # Automatically generated by scripts/gen_mapfiles.py.
2
- # DO NOT EDIT BY HAND; run `make mapfiles` instead.
3
-
4
- FORMATTERS = {
5
- 'BBCodeFormatter': ('pygments.formatters.bbcode', 'BBCode', ('bbcode', 'bb'), (), 'Format tokens with BBcodes. These formatting codes are used by many bulletin boards, so you can highlight your sourcecode with pygments before posting it there.'),
6
- 'BmpImageFormatter': ('pygments.formatters.img', 'img_bmp', ('bmp', 'bitmap'), ('*.bmp',), 'Create a bitmap image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
7
- 'GifImageFormatter': ('pygments.formatters.img', 'img_gif', ('gif',), ('*.gif',), 'Create a GIF image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
8
- 'GroffFormatter': ('pygments.formatters.groff', 'groff', ('groff', 'troff', 'roff'), (), 'Format tokens with groff escapes to change their color and font style.'),
9
- 'HtmlFormatter': ('pygments.formatters.html', 'HTML', ('html',), ('*.html', '*.htm'), "Format tokens as HTML 4 ``<span>`` tags within a ``<pre>`` tag, wrapped in a ``<div>`` tag. The ``<div>``'s CSS class can be set by the `cssclass` option."),
10
- 'IRCFormatter': ('pygments.formatters.irc', 'IRC', ('irc', 'IRC'), (), 'Format tokens with IRC color sequences'),
11
- 'ImageFormatter': ('pygments.formatters.img', 'img', ('img', 'IMG', 'png'), ('*.png',), 'Create a PNG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
12
- 'JpgImageFormatter': ('pygments.formatters.img', 'img_jpg', ('jpg', 'jpeg'), ('*.jpg',), 'Create a JPEG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
13
- 'LatexFormatter': ('pygments.formatters.latex', 'LaTeX', ('latex', 'tex'), ('*.tex',), 'Format tokens as LaTeX code. This needs the `fancyvrb` and `color` standard packages.'),
14
- 'NullFormatter': ('pygments.formatters.other', 'Text only', ('text', 'null'), ('*.txt',), 'Output the text unchanged without any formatting.'),
15
- 'PangoMarkupFormatter': ('pygments.formatters.pangomarkup', 'Pango Markup', ('pango', 'pangomarkup'), (), 'Format tokens as Pango Markup code. It can then be rendered to an SVG.'),
16
- 'RawTokenFormatter': ('pygments.formatters.other', 'Raw tokens', ('raw', 'tokens'), ('*.raw',), 'Format tokens as a raw representation for storing token streams.'),
17
- 'RtfFormatter': ('pygments.formatters.rtf', 'RTF', ('rtf',), ('*.rtf',), 'Format tokens as RTF markup. This formatter automatically outputs full RTF documents with color information and other useful stuff. Perfect for Copy and Paste into Microsoft(R) Word(R) documents.'),
18
- 'SvgFormatter': ('pygments.formatters.svg', 'SVG', ('svg',), ('*.svg',), 'Format tokens as an SVG graphics file. This formatter is still experimental. Each line of code is a ``<text>`` element with explicit ``x`` and ``y`` coordinates containing ``<tspan>`` elements with the individual token styles.'),
19
- 'Terminal256Formatter': ('pygments.formatters.terminal256', 'Terminal256', ('terminal256', 'console256', '256'), (), 'Format tokens with ANSI color sequences, for output in a 256-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'),
20
- 'TerminalFormatter': ('pygments.formatters.terminal', 'Terminal', ('terminal', 'console'), (), 'Format tokens with ANSI color sequences, for output in a text console. Color sequences are terminated at newlines, so that paging the output works correctly.'),
21
- 'TerminalTrueColorFormatter': ('pygments.formatters.terminal256', 'TerminalTrueColor', ('terminal16m', 'console16m', '16m'), (), 'Format tokens with ANSI color sequences, for output in a true-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'),
22
- 'TestcaseFormatter': ('pygments.formatters.other', 'Testcase', ('testcase',), (), 'Format tokens as appropriate for a new testcase.'),
23
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/pyparsing/common.py DELETED
@@ -1,424 +0,0 @@
1
- # common.py
2
- from .core import *
3
- from .helpers import delimited_list, any_open_tag, any_close_tag
4
- from datetime import datetime
5
-
6
-
7
- # some other useful expressions - using lower-case class name since we are really using this as a namespace
8
- class pyparsing_common:
9
- """Here are some common low-level expressions that may be useful in
10
- jump-starting parser development:
11
-
12
- - numeric forms (:class:`integers<integer>`, :class:`reals<real>`,
13
- :class:`scientific notation<sci_real>`)
14
- - common :class:`programming identifiers<identifier>`
15
- - network addresses (:class:`MAC<mac_address>`,
16
- :class:`IPv4<ipv4_address>`, :class:`IPv6<ipv6_address>`)
17
- - ISO8601 :class:`dates<iso8601_date>` and
18
- :class:`datetime<iso8601_datetime>`
19
- - :class:`UUID<uuid>`
20
- - :class:`comma-separated list<comma_separated_list>`
21
- - :class:`url`
22
-
23
- Parse actions:
24
-
25
- - :class:`convertToInteger`
26
- - :class:`convertToFloat`
27
- - :class:`convertToDate`
28
- - :class:`convertToDatetime`
29
- - :class:`stripHTMLTags`
30
- - :class:`upcaseTokens`
31
- - :class:`downcaseTokens`
32
-
33
- Example::
34
-
35
- pyparsing_common.number.runTests('''
36
- # any int or real number, returned as the appropriate type
37
- 100
38
- -100
39
- +100
40
- 3.14159
41
- 6.02e23
42
- 1e-12
43
- ''')
44
-
45
- pyparsing_common.fnumber.runTests('''
46
- # any int or real number, returned as float
47
- 100
48
- -100
49
- +100
50
- 3.14159
51
- 6.02e23
52
- 1e-12
53
- ''')
54
-
55
- pyparsing_common.hex_integer.runTests('''
56
- # hex numbers
57
- 100
58
- FF
59
- ''')
60
-
61
- pyparsing_common.fraction.runTests('''
62
- # fractions
63
- 1/2
64
- -3/4
65
- ''')
66
-
67
- pyparsing_common.mixed_integer.runTests('''
68
- # mixed fractions
69
- 1
70
- 1/2
71
- -3/4
72
- 1-3/4
73
- ''')
74
-
75
- import uuid
76
- pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
77
- pyparsing_common.uuid.runTests('''
78
- # uuid
79
- 12345678-1234-5678-1234-567812345678
80
- ''')
81
-
82
- prints::
83
-
84
- # any int or real number, returned as the appropriate type
85
- 100
86
- [100]
87
-
88
- -100
89
- [-100]
90
-
91
- +100
92
- [100]
93
-
94
- 3.14159
95
- [3.14159]
96
-
97
- 6.02e23
98
- [6.02e+23]
99
-
100
- 1e-12
101
- [1e-12]
102
-
103
- # any int or real number, returned as float
104
- 100
105
- [100.0]
106
-
107
- -100
108
- [-100.0]
109
-
110
- +100
111
- [100.0]
112
-
113
- 3.14159
114
- [3.14159]
115
-
116
- 6.02e23
117
- [6.02e+23]
118
-
119
- 1e-12
120
- [1e-12]
121
-
122
- # hex numbers
123
- 100
124
- [256]
125
-
126
- FF
127
- [255]
128
-
129
- # fractions
130
- 1/2
131
- [0.5]
132
-
133
- -3/4
134
- [-0.75]
135
-
136
- # mixed fractions
137
- 1
138
- [1]
139
-
140
- 1/2
141
- [0.5]
142
-
143
- -3/4
144
- [-0.75]
145
-
146
- 1-3/4
147
- [1.75]
148
-
149
- # uuid
150
- 12345678-1234-5678-1234-567812345678
151
- [UUID('12345678-1234-5678-1234-567812345678')]
152
- """
153
-
154
- convert_to_integer = token_map(int)
155
- """
156
- Parse action for converting parsed integers to Python int
157
- """
158
-
159
- convert_to_float = token_map(float)
160
- """
161
- Parse action for converting parsed numbers to Python float
162
- """
163
-
164
- integer = Word(nums).set_name("integer").set_parse_action(convert_to_integer)
165
- """expression that parses an unsigned integer, returns an int"""
166
-
167
- hex_integer = (
168
- Word(hexnums).set_name("hex integer").set_parse_action(token_map(int, 16))
169
- )
170
- """expression that parses a hexadecimal integer, returns an int"""
171
-
172
- signed_integer = (
173
- Regex(r"[+-]?\d+")
174
- .set_name("signed integer")
175
- .set_parse_action(convert_to_integer)
176
- )
177
- """expression that parses an integer with optional leading sign, returns an int"""
178
-
179
- fraction = (
180
- signed_integer().set_parse_action(convert_to_float)
181
- + "/"
182
- + signed_integer().set_parse_action(convert_to_float)
183
- ).set_name("fraction")
184
- """fractional expression of an integer divided by an integer, returns a float"""
185
- fraction.add_parse_action(lambda tt: tt[0] / tt[-1])
186
-
187
- mixed_integer = (
188
- fraction | signed_integer + Opt(Opt("-").suppress() + fraction)
189
- ).set_name("fraction or mixed integer-fraction")
190
- """mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
191
- mixed_integer.add_parse_action(sum)
192
-
193
- real = (
194
- Regex(r"[+-]?(?:\d+\.\d*|\.\d+)")
195
- .set_name("real number")
196
- .set_parse_action(convert_to_float)
197
- )
198
- """expression that parses a floating point number and returns a float"""
199
-
200
- sci_real = (
201
- Regex(r"[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)")
202
- .set_name("real number with scientific notation")
203
- .set_parse_action(convert_to_float)
204
- )
205
- """expression that parses a floating point number with optional
206
- scientific notation and returns a float"""
207
-
208
- # streamlining this expression makes the docs nicer-looking
209
- number = (sci_real | real | signed_integer).setName("number").streamline()
210
- """any numeric expression, returns the corresponding Python type"""
211
-
212
- fnumber = (
213
- Regex(r"[+-]?\d+\.?\d*([eE][+-]?\d+)?")
214
- .set_name("fnumber")
215
- .set_parse_action(convert_to_float)
216
- )
217
- """any int or real number, returned as float"""
218
-
219
- identifier = Word(identchars, identbodychars).set_name("identifier")
220
- """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
221
-
222
- ipv4_address = Regex(
223
- r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}"
224
- ).set_name("IPv4 address")
225
- "IPv4 address (``0.0.0.0 - 255.255.255.255``)"
226
-
227
- _ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").set_name("hex_integer")
228
- _full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).set_name(
229
- "full IPv6 address"
230
- )
231
- _short_ipv6_address = (
232
- Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6))
233
- + "::"
234
- + Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6))
235
- ).set_name("short IPv6 address")
236
- _short_ipv6_address.add_condition(
237
- lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8
238
- )
239
- _mixed_ipv6_address = ("::ffff:" + ipv4_address).set_name("mixed IPv6 address")
240
- ipv6_address = Combine(
241
- (_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).set_name(
242
- "IPv6 address"
243
- )
244
- ).set_name("IPv6 address")
245
- "IPv6 address (long, short, or mixed form)"
246
-
247
- mac_address = Regex(
248
- r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}"
249
- ).set_name("MAC address")
250
- "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
251
-
252
- @staticmethod
253
- def convert_to_date(fmt: str = "%Y-%m-%d"):
254
- """
255
- Helper to create a parse action for converting parsed date string to Python datetime.date
256
-
257
- Params -
258
- - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``)
259
-
260
- Example::
261
-
262
- date_expr = pyparsing_common.iso8601_date.copy()
263
- date_expr.setParseAction(pyparsing_common.convertToDate())
264
- print(date_expr.parseString("1999-12-31"))
265
-
266
- prints::
267
-
268
- [datetime.date(1999, 12, 31)]
269
- """
270
-
271
- def cvt_fn(ss, ll, tt):
272
- try:
273
- return datetime.strptime(tt[0], fmt).date()
274
- except ValueError as ve:
275
- raise ParseException(ss, ll, str(ve))
276
-
277
- return cvt_fn
278
-
279
- @staticmethod
280
- def convert_to_datetime(fmt: str = "%Y-%m-%dT%H:%M:%S.%f"):
281
- """Helper to create a parse action for converting parsed
282
- datetime string to Python datetime.datetime
283
-
284
- Params -
285
- - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``)
286
-
287
- Example::
288
-
289
- dt_expr = pyparsing_common.iso8601_datetime.copy()
290
- dt_expr.setParseAction(pyparsing_common.convertToDatetime())
291
- print(dt_expr.parseString("1999-12-31T23:59:59.999"))
292
-
293
- prints::
294
-
295
- [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
296
- """
297
-
298
- def cvt_fn(s, l, t):
299
- try:
300
- return datetime.strptime(t[0], fmt)
301
- except ValueError as ve:
302
- raise ParseException(s, l, str(ve))
303
-
304
- return cvt_fn
305
-
306
- iso8601_date = Regex(
307
- r"(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?"
308
- ).set_name("ISO8601 date")
309
- "ISO8601 date (``yyyy-mm-dd``)"
310
-
311
- iso8601_datetime = Regex(
312
- r"(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?"
313
- ).set_name("ISO8601 datetime")
314
- "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``"
315
-
316
- uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").set_name("UUID")
317
- "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)"
318
-
319
- _html_stripper = any_open_tag.suppress() | any_close_tag.suppress()
320
-
321
- @staticmethod
322
- def strip_html_tags(s: str, l: int, tokens: ParseResults):
323
- """Parse action to remove HTML tags from web page HTML source
324
-
325
- Example::
326
-
327
- # strip HTML links from normal text
328
- text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
329
- td, td_end = makeHTMLTags("TD")
330
- table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
331
- print(table_text.parseString(text).body)
332
-
333
- Prints::
334
-
335
- More info at the pyparsing wiki page
336
- """
337
- return pyparsing_common._html_stripper.transform_string(tokens[0])
338
-
339
- _commasepitem = (
340
- Combine(
341
- OneOrMore(
342
- ~Literal(",")
343
- + ~LineEnd()
344
- + Word(printables, exclude_chars=",")
345
- + Opt(White(" \t") + ~FollowedBy(LineEnd() | ","))
346
- )
347
- )
348
- .streamline()
349
- .set_name("commaItem")
350
- )
351
- comma_separated_list = delimited_list(
352
- Opt(quoted_string.copy() | _commasepitem, default="")
353
- ).set_name("comma separated list")
354
- """Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
355
-
356
- upcase_tokens = staticmethod(token_map(lambda t: t.upper()))
357
- """Parse action to convert tokens to upper case."""
358
-
359
- downcase_tokens = staticmethod(token_map(lambda t: t.lower()))
360
- """Parse action to convert tokens to lower case."""
361
-
362
- # fmt: off
363
- url = Regex(
364
- # https://mathiasbynens.be/demo/url-regex
365
- # https://gist.github.com/dperini/729294
366
- r"^" +
367
- # protocol identifier (optional)
368
- # short syntax // still required
369
- r"(?:(?:(?P<scheme>https?|ftp):)?\/\/)" +
370
- # user:pass BasicAuth (optional)
371
- r"(?:(?P<auth>\S+(?::\S*)?)@)?" +
372
- r"(?P<host>" +
373
- # IP address exclusion
374
- # private & local networks
375
- r"(?!(?:10|127)(?:\.\d{1,3}){3})" +
376
- r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})" +
377
- r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})" +
378
- # IP address dotted notation octets
379
- # excludes loopback network 0.0.0.0
380
- # excludes reserved space >= 224.0.0.0
381
- # excludes network & broadcast addresses
382
- # (first & last IP address of each class)
383
- r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])" +
384
- r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}" +
385
- r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" +
386
- r"|" +
387
- # host & domain names, may end with dot
388
- # can be replaced by a shortest alternative
389
- # (?![-_])(?:[-\w\u00a1-\uffff]{0,63}[^-_]\.)+
390
- r"(?:" +
391
- r"(?:" +
392
- r"[a-z0-9\u00a1-\uffff]" +
393
- r"[a-z0-9\u00a1-\uffff_-]{0,62}" +
394
- r")?" +
395
- r"[a-z0-9\u00a1-\uffff]\." +
396
- r")+" +
397
- # TLD identifier name, may end with dot
398
- r"(?:[a-z\u00a1-\uffff]{2,}\.?)" +
399
- r")" +
400
- # port number (optional)
401
- r"(:(?P<port>\d{2,5}))?" +
402
- # resource path (optional)
403
- r"(?P<path>\/[^?# ]*)?" +
404
- # query string (optional)
405
- r"(\?(?P<query>[^#]*))?" +
406
- # fragment (optional)
407
- r"(#(?P<fragment>\S*))?" +
408
- r"$"
409
- ).set_name("url")
410
- # fmt: on
411
-
412
- # pre-PEP8 compatibility names
413
- convertToInteger = convert_to_integer
414
- convertToFloat = convert_to_float
415
- convertToDate = convert_to_date
416
- convertToDatetime = convert_to_datetime
417
- stripHTMLTags = strip_html_tags
418
- upcaseTokens = upcase_tokens
419
- downcaseTokens = downcase_tokens
420
-
421
-
422
- _builtin_exprs = [
423
- v for v in vars(pyparsing_common).values() if isinstance(v, ParserElement)
424
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/reduce_by_key.h DELETED
@@ -1,23 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system inherits reduce_by_key
22
- #include <thrust/system/detail/sequential/reduce_by_key.h>
23
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/datasets/coco.py DELETED
@@ -1,548 +0,0 @@
1
- import itertools
2
- import logging
3
- import os.path as osp
4
- import tempfile
5
- from collections import OrderedDict
6
-
7
- import mmcv
8
- import numpy as np
9
- import pycocotools
10
- from mmcv.utils import print_log
11
- from pycocotools.coco import COCO
12
- from pycocotools.cocoeval import COCOeval
13
- from terminaltables import AsciiTable
14
-
15
- from mmdet.core import eval_recalls
16
- from .builder import DATASETS
17
- from .custom import CustomDataset
18
-
19
-
20
- @DATASETS.register_module()
21
- class CocoDataset(CustomDataset):
22
-
23
- CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
24
- 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
25
- 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
26
- 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
27
- 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
28
- 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',
29
- 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
30
- 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
31
- 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
32
- 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
33
- 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',
34
- 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
35
- 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
36
- 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush')
37
-
38
- def load_annotations(self, ann_file):
39
- """Load annotation from COCO style annotation file.
40
-
41
- Args:
42
- ann_file (str): Path of annotation file.
43
-
44
- Returns:
45
- list[dict]: Annotation info from COCO api.
46
- """
47
- if not getattr(pycocotools, '__version__', '0') >= '12.0.2':
48
- raise AssertionError(
49
- 'Incompatible version of pycocotools is installed. '
50
- 'Run pip uninstall pycocotools first. Then run pip '
51
- 'install mmpycocotools to install open-mmlab forked '
52
- 'pycocotools.')
53
-
54
- self.coco = COCO(ann_file)
55
- self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
56
- self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
57
- self.img_ids = self.coco.get_img_ids()
58
- data_infos = []
59
- total_ann_ids = []
60
- for i in self.img_ids:
61
- info = self.coco.load_imgs([i])[0]
62
- info['filename'] = info['file_name']
63
- data_infos.append(info)
64
- ann_ids = self.coco.get_ann_ids(img_ids=[i])
65
- total_ann_ids.extend(ann_ids)
66
- assert len(set(total_ann_ids)) == len(
67
- total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
68
- return data_infos
69
-
70
- def get_ann_info(self, idx):
71
- """Get COCO annotation by index.
72
-
73
- Args:
74
- idx (int): Index of data.
75
-
76
- Returns:
77
- dict: Annotation info of specified index.
78
- """
79
-
80
- img_id = self.data_infos[idx]['id']
81
- ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
82
- ann_info = self.coco.load_anns(ann_ids)
83
- return self._parse_ann_info(self.data_infos[idx], ann_info)
84
-
85
- def get_cat_ids(self, idx):
86
- """Get COCO category ids by index.
87
-
88
- Args:
89
- idx (int): Index of data.
90
-
91
- Returns:
92
- list[int]: All categories in the image of specified index.
93
- """
94
-
95
- img_id = self.data_infos[idx]['id']
96
- ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
97
- ann_info = self.coco.load_anns(ann_ids)
98
- return [ann['category_id'] for ann in ann_info]
99
-
100
- def _filter_imgs(self, min_size=32):
101
- """Filter images too small or without ground truths."""
102
- valid_inds = []
103
- # obtain images that contain annotation
104
- ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
105
- # obtain images that contain annotations of the required categories
106
- ids_in_cat = set()
107
- for i, class_id in enumerate(self.cat_ids):
108
- ids_in_cat |= set(self.coco.cat_img_map[class_id])
109
- # merge the image id sets of the two conditions and use the merged set
110
- # to filter out images if self.filter_empty_gt=True
111
- ids_in_cat &= ids_with_ann
112
-
113
- valid_img_ids = []
114
- for i, img_info in enumerate(self.data_infos):
115
- img_id = self.img_ids[i]
116
- if self.filter_empty_gt and img_id not in ids_in_cat:
117
- continue
118
- if min(img_info['width'], img_info['height']) >= min_size:
119
- valid_inds.append(i)
120
- valid_img_ids.append(img_id)
121
- self.img_ids = valid_img_ids
122
- return valid_inds
123
-
124
- def _parse_ann_info(self, img_info, ann_info):
125
- """Parse bbox and mask annotation.
126
-
127
- Args:
128
- ann_info (list[dict]): Annotation info of an image.
129
- with_mask (bool): Whether to parse mask annotations.
130
-
131
- Returns:
132
- dict: A dict containing the following keys: bboxes, bboxes_ignore,\
133
- labels, masks, seg_map. "masks" are raw annotations and not \
134
- decoded into binary masks.
135
- """
136
- gt_bboxes = []
137
- gt_labels = []
138
- gt_bboxes_ignore = []
139
- gt_masks_ann = []
140
- for i, ann in enumerate(ann_info):
141
- if ann.get('ignore', False):
142
- continue
143
- x1, y1, w, h = ann['bbox']
144
- inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
145
- inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
146
- if inter_w * inter_h == 0:
147
- continue
148
- if ann['area'] <= 0 or w < 1 or h < 1:
149
- continue
150
- if ann['category_id'] not in self.cat_ids:
151
- continue
152
- bbox = [x1, y1, x1 + w, y1 + h]
153
- if ann.get('iscrowd', False):
154
- gt_bboxes_ignore.append(bbox)
155
- else:
156
- gt_bboxes.append(bbox)
157
- gt_labels.append(self.cat2label[ann['category_id']])
158
- gt_masks_ann.append(ann.get('segmentation', None))
159
-
160
- if gt_bboxes:
161
- gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
162
- gt_labels = np.array(gt_labels, dtype=np.int64)
163
- else:
164
- gt_bboxes = np.zeros((0, 4), dtype=np.float32)
165
- gt_labels = np.array([], dtype=np.int64)
166
-
167
- if gt_bboxes_ignore:
168
- gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
169
- else:
170
- gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
171
-
172
- seg_map = img_info['filename'].replace('jpg', 'png')
173
-
174
- ann = dict(
175
- bboxes=gt_bboxes,
176
- labels=gt_labels,
177
- bboxes_ignore=gt_bboxes_ignore,
178
- masks=gt_masks_ann,
179
- seg_map=seg_map)
180
-
181
- return ann
182
-
183
- def xyxy2xywh(self, bbox):
184
- """Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
185
- evaluation.
186
-
187
- Args:
188
- bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
189
- ``xyxy`` order.
190
-
191
- Returns:
192
- list[float]: The converted bounding boxes, in ``xywh`` order.
193
- """
194
-
195
- _bbox = bbox.tolist()
196
- return [
197
- _bbox[0],
198
- _bbox[1],
199
- _bbox[2] - _bbox[0],
200
- _bbox[3] - _bbox[1],
201
- ]
202
-
203
- def _proposal2json(self, results):
204
- """Convert proposal results to COCO json style."""
205
- json_results = []
206
- for idx in range(len(self)):
207
- img_id = self.img_ids[idx]
208
- bboxes = results[idx]
209
- for i in range(bboxes.shape[0]):
210
- data = dict()
211
- data['image_id'] = img_id
212
- data['bbox'] = self.xyxy2xywh(bboxes[i])
213
- data['score'] = float(bboxes[i][4])
214
- data['category_id'] = 1
215
- json_results.append(data)
216
- return json_results
217
-
218
- def _det2json(self, results):
219
- """Convert detection results to COCO json style."""
220
- json_results = []
221
- for idx in range(len(self)):
222
- img_id = self.img_ids[idx]
223
- result = results[idx]
224
- for label in range(len(result)):
225
- bboxes = result[label]
226
- for i in range(bboxes.shape[0]):
227
- data = dict()
228
- data['image_id'] = img_id
229
- data['bbox'] = self.xyxy2xywh(bboxes[i])
230
- data['score'] = float(bboxes[i][4])
231
- data['category_id'] = self.cat_ids[label]
232
- json_results.append(data)
233
- return json_results
234
-
235
- def _segm2json(self, results):
236
- """Convert instance segmentation results to COCO json style."""
237
- bbox_json_results = []
238
- segm_json_results = []
239
- for idx in range(len(self)):
240
- img_id = self.img_ids[idx]
241
- det, seg = results[idx]
242
- for label in range(len(det)):
243
- # bbox results
244
- bboxes = det[label]
245
- for i in range(bboxes.shape[0]):
246
- data = dict()
247
- data['image_id'] = img_id
248
- data['bbox'] = self.xyxy2xywh(bboxes[i])
249
- data['score'] = float(bboxes[i][4])
250
- data['category_id'] = self.cat_ids[label]
251
- bbox_json_results.append(data)
252
-
253
- # segm results
254
- # some detectors use different scores for bbox and mask
255
- if isinstance(seg, tuple):
256
- segms = seg[0][label]
257
- mask_score = seg[1][label]
258
- else:
259
- segms = seg[label]
260
- mask_score = [bbox[4] for bbox in bboxes]
261
- for i in range(bboxes.shape[0]):
262
- data = dict()
263
- data['image_id'] = img_id
264
- data['bbox'] = self.xyxy2xywh(bboxes[i])
265
- data['score'] = float(mask_score[i])
266
- data['category_id'] = self.cat_ids[label]
267
- if isinstance(segms[i]['counts'], bytes):
268
- segms[i]['counts'] = segms[i]['counts'].decode()
269
- data['segmentation'] = segms[i]
270
- segm_json_results.append(data)
271
- return bbox_json_results, segm_json_results
272
-
273
- def results2json(self, results, outfile_prefix):
274
- """Dump the detection results to a COCO style json file.
275
-
276
- There are 3 types of results: proposals, bbox predictions, mask
277
- predictions, and they have different data types. This method will
278
- automatically recognize the type, and dump them to json files.
279
-
280
- Args:
281
- results (list[list | tuple | ndarray]): Testing results of the
282
- dataset.
283
- outfile_prefix (str): The filename prefix of the json files. If the
284
- prefix is "somepath/xxx", the json files will be named
285
- "somepath/xxx.bbox.json", "somepath/xxx.segm.json",
286
- "somepath/xxx.proposal.json".
287
-
288
- Returns:
289
- dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
290
- values are corresponding filenames.
291
- """
292
- result_files = dict()
293
- if isinstance(results[0], list):
294
- json_results = self._det2json(results)
295
- result_files['bbox'] = f'{outfile_prefix}.bbox.json'
296
- result_files['proposal'] = f'{outfile_prefix}.bbox.json'
297
- mmcv.dump(json_results, result_files['bbox'])
298
- elif isinstance(results[0], tuple):
299
- json_results = self._segm2json(results)
300
- result_files['bbox'] = f'{outfile_prefix}.bbox.json'
301
- result_files['proposal'] = f'{outfile_prefix}.bbox.json'
302
- result_files['segm'] = f'{outfile_prefix}.segm.json'
303
- mmcv.dump(json_results[0], result_files['bbox'])
304
- mmcv.dump(json_results[1], result_files['segm'])
305
- elif isinstance(results[0], np.ndarray):
306
- json_results = self._proposal2json(results)
307
- result_files['proposal'] = f'{outfile_prefix}.proposal.json'
308
- mmcv.dump(json_results, result_files['proposal'])
309
- else:
310
- raise TypeError('invalid type of results')
311
- return result_files
312
-
313
- def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
314
- gt_bboxes = []
315
- for i in range(len(self.img_ids)):
316
- ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
317
- ann_info = self.coco.load_anns(ann_ids)
318
- if len(ann_info) == 0:
319
- gt_bboxes.append(np.zeros((0, 4)))
320
- continue
321
- bboxes = []
322
- for ann in ann_info:
323
- if ann.get('ignore', False) or ann['iscrowd']:
324
- continue
325
- x1, y1, w, h = ann['bbox']
326
- bboxes.append([x1, y1, x1 + w, y1 + h])
327
- bboxes = np.array(bboxes, dtype=np.float32)
328
- if bboxes.shape[0] == 0:
329
- bboxes = np.zeros((0, 4))
330
- gt_bboxes.append(bboxes)
331
-
332
- recalls = eval_recalls(
333
- gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
334
- ar = recalls.mean(axis=1)
335
- return ar
336
-
337
- def format_results(self, results, jsonfile_prefix=None, **kwargs):
338
- """Format the results to json (standard format for COCO evaluation).
339
-
340
- Args:
341
- results (list[tuple | numpy.ndarray]): Testing results of the
342
- dataset.
343
- jsonfile_prefix (str | None): The prefix of json files. It includes
344
- the file path and the prefix of filename, e.g., "a/b/prefix".
345
- If not specified, a temp file will be created. Default: None.
346
-
347
- Returns:
348
- tuple: (result_files, tmp_dir), result_files is a dict containing \
349
- the json filepaths, tmp_dir is the temporal directory created \
350
- for saving json files when jsonfile_prefix is not specified.
351
- """
352
- assert isinstance(results, list), 'results must be a list'
353
- assert len(results) == len(self), (
354
- 'The length of results is not equal to the dataset len: {} != {}'.
355
- format(len(results), len(self)))
356
-
357
- if jsonfile_prefix is None:
358
- tmp_dir = tempfile.TemporaryDirectory()
359
- jsonfile_prefix = osp.join(tmp_dir.name, 'results')
360
- else:
361
- tmp_dir = None
362
- result_files = self.results2json(results, jsonfile_prefix)
363
- return result_files, tmp_dir
364
-
365
- def evaluate(self,
366
- results,
367
- metric='bbox',
368
- logger=None,
369
- jsonfile_prefix=None,
370
- classwise=False,
371
- proposal_nums=(100, 300, 1000),
372
- iou_thrs=None,
373
- metric_items=None):
374
- """Evaluation in COCO protocol.
375
-
376
- Args:
377
- results (list[list | tuple]): Testing results of the dataset.
378
- metric (str | list[str]): Metrics to be evaluated. Options are
379
- 'bbox', 'segm', 'proposal', 'proposal_fast'.
380
- logger (logging.Logger | str | None): Logger used for printing
381
- related information during evaluation. Default: None.
382
- jsonfile_prefix (str | None): The prefix of json files. It includes
383
- the file path and the prefix of filename, e.g., "a/b/prefix".
384
- If not specified, a temp file will be created. Default: None.
385
- classwise (bool): Whether to evaluating the AP for each class.
386
- proposal_nums (Sequence[int]): Proposal number used for evaluating
387
- recalls, such as recall@100, recall@1000.
388
- Default: (100, 300, 1000).
389
- iou_thrs (Sequence[float], optional): IoU threshold used for
390
- evaluating recalls/mAPs. If set to a list, the average of all
391
- IoUs will also be computed. If not specified, [0.50, 0.55,
392
- 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
393
- Default: None.
394
- metric_items (list[str] | str, optional): Metric items that will
395
- be returned. If not specified, ``['AR@100', 'AR@300',
396
- 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
397
- used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
398
- 'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
399
- ``metric=='bbox' or metric=='segm'``.
400
-
401
- Returns:
402
- dict[str, float]: COCO style evaluation metric.
403
- """
404
-
405
- metrics = metric if isinstance(metric, list) else [metric]
406
- allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
407
- for metric in metrics:
408
- if metric not in allowed_metrics:
409
- raise KeyError(f'metric {metric} is not supported')
410
- if iou_thrs is None:
411
- iou_thrs = np.linspace(
412
- .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
413
- if metric_items is not None:
414
- if not isinstance(metric_items, list):
415
- metric_items = [metric_items]
416
-
417
- #result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
418
-
419
- eval_results = OrderedDict()
420
- cocoGt = self.coco
421
- print(cocoGt['images'])
422
- asas
423
- for metric in metrics:
424
- msg = f'Evaluating {metric}...'
425
- if logger is None:
426
- msg = '\n' + msg
427
- print_log(msg, logger=logger)
428
-
429
- if metric == 'proposal_fast':
430
- ar = self.fast_eval_recall(
431
- results, proposal_nums, iou_thrs, logger='silent')
432
- log_msg = []
433
- for i, num in enumerate(proposal_nums):
434
- eval_results[f'AR@{num}'] = ar[i]
435
- log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
436
- log_msg = ''.join(log_msg)
437
- print_log(log_msg, logger=logger)
438
- continue
439
-
440
- if metric not in result_files:
441
- raise KeyError(f'{metric} is not in results')
442
- try:
443
- cocoDt = cocoGt.loadRes(result_files[metric])
444
- except IndexError:
445
- print_log(
446
- 'The testing results of the whole dataset is empty.',
447
- logger=logger,
448
- level=logging.ERROR)
449
- break
450
-
451
- iou_type = 'bbox' if metric == 'proposal' else metric
452
- cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
453
- cocoEval.params.catIds = self.cat_ids
454
- cocoEval.params.imgIds = self.img_ids
455
- cocoEval.params.maxDets = list(proposal_nums)
456
- cocoEval.params.iouThrs = iou_thrs
457
- # mapping of cocoEval.stats
458
- coco_metric_names = {
459
- 'mAP': 0,
460
- 'mAP_50': 1,
461
- 'mAP_75': 2,
462
- 'mAP_s': 3,
463
- 'mAP_m': 4,
464
- 'mAP_l': 5,
465
- 'AR@100': 6,
466
- 'AR@300': 7,
467
- 'AR@1000': 8,
468
- 'AR_s@1000': 9,
469
- 'AR_m@1000': 10,
470
- 'AR_l@1000': 11
471
- }
472
- if metric_items is not None:
473
- for metric_item in metric_items:
474
- if metric_item not in coco_metric_names:
475
- raise KeyError(
476
- f'metric item {metric_item} is not supported')
477
-
478
- if metric == 'proposal':
479
- cocoEval.params.useCats = 0
480
- cocoEval.evaluate()
481
- cocoEval.accumulate()
482
- cocoEval.summarize()
483
- if metric_items is None:
484
- metric_items = [
485
- 'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
486
- 'AR_m@1000', 'AR_l@1000'
487
- ]
488
-
489
- for item in metric_items:
490
- val = float(
491
- f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
492
- eval_results[item] = val
493
- else:
494
- cocoEval.evaluate()
495
- cocoEval.accumulate()
496
- cocoEval.summarize()
497
- if classwise: # Compute per-category AP
498
- # Compute per-category AP
499
- # from https://github.com/facebookresearch/detectron2/
500
- precisions = cocoEval.eval['precision']
501
- # precision: (iou, recall, cls, area range, max dets)
502
- assert len(self.cat_ids) == precisions.shape[2]
503
-
504
- results_per_category = []
505
- for idx, catId in enumerate(self.cat_ids):
506
- # area range index 0: all area ranges
507
- # max dets index -1: typically 100 per image
508
- nm = self.coco.loadCats(catId)[0]
509
- precision = precisions[:, :, idx, 0, -1]
510
- precision = precision[precision > -1]
511
- if precision.size:
512
- ap = np.mean(precision)
513
- else:
514
- ap = float('nan')
515
- results_per_category.append(
516
- (f'{nm["name"]}', f'{float(ap):0.3f}'))
517
-
518
- num_columns = min(6, len(results_per_category) * 2)
519
- results_flatten = list(
520
- itertools.chain(*results_per_category))
521
- headers = ['category', 'AP'] * (num_columns // 2)
522
- results_2d = itertools.zip_longest(*[
523
- results_flatten[i::num_columns]
524
- for i in range(num_columns)
525
- ])
526
- table_data = [headers]
527
- table_data += [result for result in results_2d]
528
- table = AsciiTable(table_data)
529
- print_log('\n' + table.table, logger=logger)
530
-
531
- if metric_items is None:
532
- metric_items = [
533
- 'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
534
- ]
535
-
536
- for metric_item in metric_items:
537
- key = f'{metric}_{metric_item}'
538
- val = float(
539
- f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
540
- )
541
- eval_results[key] = val
542
- ap = cocoEval.stats[:6]
543
- eval_results[f'{metric}_mAP_copypaste'] = (
544
- f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
545
- f'{ap[4]:.3f} {ap[5]:.3f}')
546
- if tmp_dir is not None:
547
- tmp_dir.cleanup()
548
- return eval_results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/engine/launch.py DELETED
@@ -1,125 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import logging
3
- from datetime import timedelta
4
- import torch
5
- import torch.distributed as dist
6
- import torch.multiprocessing as mp
7
-
8
- from detectron2.utils import comm
9
-
10
- __all__ = ["DEFAULT_TIMEOUT", "launch"]
11
-
12
- DEFAULT_TIMEOUT = timedelta(minutes=30)
13
-
14
-
15
- def _find_free_port():
16
- import socket
17
-
18
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
19
- # Binding to port 0 will cause the OS to find an available port for us
20
- sock.bind(("", 0))
21
- port = sock.getsockname()[1]
22
- sock.close()
23
- # NOTE: there is still a chance the port could be taken by other processes.
24
- return port
25
-
26
-
27
- def launch(
28
- main_func,
29
- num_gpus_per_machine,
30
- num_machines=1,
31
- machine_rank=0,
32
- dist_url=None,
33
- args=(),
34
- timeout=DEFAULT_TIMEOUT,
35
- ):
36
- """
37
- Launch multi-gpu or distributed training.
38
- This function must be called on all machines involved in the training.
39
- It will spawn child processes (defined by ``num_gpus_per_machine``) on each machine.
40
-
41
- Args:
42
- main_func: a function that will be called by `main_func(*args)`
43
- num_gpus_per_machine (int): number of GPUs per machine
44
- num_machines (int): the total number of machines
45
- machine_rank (int): the rank of this machine
46
- dist_url (str): url to connect to for distributed jobs, including protocol
47
- e.g. "tcp://127.0.0.1:8686".
48
- Can be set to "auto" to automatically select a free port on localhost
49
- timeout (timedelta): timeout of the distributed workers
50
- args (tuple): arguments passed to main_func
51
- """
52
- world_size = num_machines * num_gpus_per_machine
53
- if world_size > 1:
54
- # https://github.com/pytorch/pytorch/pull/14391
55
- # TODO prctl in spawned processes
56
-
57
- if dist_url == "auto":
58
- assert num_machines == 1, "dist_url=auto not supported in multi-machine jobs."
59
- port = _find_free_port()
60
- dist_url = f"tcp://127.0.0.1:{port}"
61
- if num_machines > 1 and dist_url.startswith("file://"):
62
- logger = logging.getLogger(__name__)
63
- logger.warning(
64
- "file:// is not a reliable init_method in multi-machine jobs. Prefer tcp://"
65
- )
66
-
67
- mp.spawn(
68
- _distributed_worker,
69
- nprocs=num_gpus_per_machine,
70
- args=(
71
- main_func,
72
- world_size,
73
- num_gpus_per_machine,
74
- machine_rank,
75
- dist_url,
76
- args,
77
- timeout,
78
- ),
79
- daemon=False,
80
- )
81
- else:
82
- main_func(*args)
83
-
84
-
85
- def _distributed_worker(
86
- local_rank,
87
- main_func,
88
- world_size,
89
- num_gpus_per_machine,
90
- machine_rank,
91
- dist_url,
92
- args,
93
- timeout=DEFAULT_TIMEOUT,
94
- ):
95
- assert torch.cuda.is_available(), "cuda is not available. Please check your installation."
96
- global_rank = machine_rank * num_gpus_per_machine + local_rank
97
- try:
98
- dist.init_process_group(
99
- backend="NCCL",
100
- init_method=dist_url,
101
- world_size=world_size,
102
- rank=global_rank,
103
- timeout=timeout,
104
- )
105
- except Exception as e:
106
- logger = logging.getLogger(__name__)
107
- logger.error("Process group URL: {}".format(dist_url))
108
- raise e
109
- # synchronize is needed here to prevent a possible timeout after calling init_process_group
110
- # See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
111
- comm.synchronize()
112
-
113
- assert num_gpus_per_machine <= torch.cuda.device_count()
114
- torch.cuda.set_device(local_rank)
115
-
116
- # Setup the local process group (which contains ranks within the same machine)
117
- assert comm._LOCAL_PROCESS_GROUP is None
118
- num_machines = world_size // num_gpus_per_machine
119
- for i in range(num_machines):
120
- ranks_on_i = list(range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine))
121
- pg = dist.new_group(ranks_on_i)
122
- if i == machine_rank:
123
- comm._LOCAL_PROCESS_GROUP = pg
124
-
125
- main_func(*args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/Yunzai/Yunzai/lib/tools/command.js DELETED
@@ -1,118 +0,0 @@
1
-
2
- import '../config/init.js'
3
- import log4js from 'log4js'
4
- import PluginsLoader from '../plugins/loader.js'
5
- import cfg from '../config/config.js'
6
-
7
- class Command {
8
- constructor () {
9
- this.command = ''
10
- // this.setLog()
11
- /** 全局Bot */
12
- global.Bot = {}
13
- }
14
-
15
- /**
16
- * @param type 命令配置类型,默认default
17
- */
18
- async run (type = 'default') {
19
- /** 加载icqq事件监听 */
20
- await PluginsLoader.load()
21
- /** 获取命令行参数 */
22
- this.getCommand()
23
- /** 伪造消息 */
24
- let e = this.fakeE(type)
25
-
26
- /** 插件处理消息 */
27
- await PluginsLoader.deal(e)
28
- }
29
-
30
- /** 设置命令 */
31
- getCommand () {
32
- if (process.argv[2]) {
33
- this.command = '#' + process.argv[2].replace(/#|#|井/g, '#').trim()
34
- }
35
- }
36
-
37
- fakeE (id = 'default') {
38
- /** 获取配置 */
39
- let data = cfg.getYaml('test', id)
40
- let text = this.command || data.text || ''
41
- logger.info(`测试命令 [${text}]`)
42
- let e = {
43
- test: true,
44
- self_id: 10000,
45
- time: new Date().getTime(),
46
- post_type: data.post_type || 'message',
47
- message_type: data.message_type || 'group',
48
- sub_type: data.sub_type || 'normal',
49
- group_id: data.group_id || 826198224,
50
- group_name: data.group_name || '测试群',
51
- user_id: data.user_id,
52
- anonymous: null,
53
- message: [{ type: 'text', text }],
54
- raw_message: text,
55
- font: '微软雅黑',
56
- sender: {
57
- user_id: data.user_id,
58
- nickname: '测试',
59
- card: data.card,
60
- sex: 'male',
61
- age: 0,
62
- area: 'unknown',
63
- level: 2,
64
- role: 'owner',
65
- title: ''
66
- },
67
- group: {
68
- mute_left: 0,
69
- sendMsg: (msg) => {
70
- logger.info(`回复内容 ${msg}`)
71
- }
72
- },
73
- friend: {
74
- getFileUrl: (fid) => {
75
- return data.message[0].url
76
- }
77
- },
78
- message_id: 'JzHU0DACliIAAAD3RzTh1WBOIC48',
79
- reply: async (msg) => {
80
- logger.info(`回复内容 ${msg}`)
81
- },
82
- toString: () => {
83
- return text
84
- }
85
- }
86
-
87
- if (data.message) {
88
- e.message = data.message
89
- }
90
-
91
- return e
92
- }
93
-
94
- /** 日志 */
95
- setLog () {
96
- log4js.configure({
97
- appenders: {
98
- // 设置控制台输出 (默认日志级别是关闭的(即不会输出日志))
99
- out: {
100
- type: 'console',
101
- layout: {
102
- type: 'pattern',
103
- pattern: '[%d{hh:mm:ss.SSS}][%[%5.5p%]] - %m'
104
- }
105
- }
106
- },
107
- // 不同等级的日志追加到不同的输出位置:appenders: ['out', 'allLog'] categories 作为getLogger方法的键名对应
108
- categories: {
109
- // appenders:采用的appender,取上面appenders项,level:设置级别
110
- default: { appenders: ['out'], level: 'debug' }
111
- }
112
- })
113
- global.logger = log4js.getLogger('[test]')
114
- logger.level = 'debug'
115
- }
116
- }
117
-
118
- export default new Command()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/meme_generator/memes/douyin/__init__.py DELETED
@@ -1,78 +0,0 @@
1
- import math
2
- import random
3
- from typing import List
4
-
5
- from PIL.Image import Image as IMG
6
- from pil_utils import BuildImage, Text2Image
7
-
8
- from meme_generator import add_meme
9
- from meme_generator.utils import save_gif
10
-
11
-
12
- def douyin(images, texts: List[str], args):
13
- text = texts[0]
14
- text = " ".join(text.splitlines())
15
- fontsize = 200
16
- offset = round(fontsize * 0.05)
17
- px = 70
18
- py = 30
19
- bg_color = "#1C0B1B"
20
- frame = Text2Image.from_text(
21
- text, fontsize, fill="#FF0050", stroke_fill="#FF0050", stroke_width=5
22
- ).to_image(bg_color=bg_color, padding=(px + offset * 2, py + offset * 2, px, py))
23
- Text2Image.from_text(
24
- text, fontsize, fill="#00F5EB", stroke_fill="#00F5EB", stroke_width=5
25
- ).draw_on_image(frame, (px, py))
26
- Text2Image.from_text(
27
- text, fontsize, fill="white", stroke_fill="white", stroke_width=5
28
- ).draw_on_image(frame, (px + offset, py + offset))
29
- frame = BuildImage(frame)
30
-
31
- width = frame.width - px
32
- height = frame.height - py
33
- frame_num = 10
34
- devide_num = 6
35
- seed = 20 * 0.05
36
- frames: List[IMG] = []
37
- for _ in range(frame_num):
38
- new_frame = frame.copy()
39
- h_seeds = [
40
- math.fabs(math.sin(random.random() * devide_num)) for _ in range(devide_num)
41
- ]
42
- h_seed_sum = sum(h_seeds)
43
- h_seeds = [s / h_seed_sum for s in h_seeds]
44
- direction = 1
45
- last_yn = 0
46
- last_h = 0
47
- for i in range(devide_num):
48
- yn = last_yn + last_h
49
- h = max(round(height * h_seeds[i]), 2)
50
- last_yn = yn
51
- last_h = h
52
- direction = -direction
53
- piece = new_frame.copy().crop((px, yn, px + width, yn + h))
54
- new_frame.paste(piece, (px + round(i * direction * seed), yn))
55
- # 透视变换
56
- move_x = 64
57
- points = (
58
- (move_x, 0),
59
- (new_frame.width + move_x, 0),
60
- (new_frame.width, new_frame.height),
61
- (0, new_frame.height),
62
- )
63
- new_frame = new_frame.perspective(points)
64
- bg = BuildImage.new("RGBA", new_frame.size, bg_color)
65
- bg.paste(new_frame, alpha=True)
66
- frames.append(bg.image)
67
-
68
- return save_gif(frames, 0.2)
69
-
70
-
71
- add_meme(
72
- "douyin",
73
- douyin,
74
- min_texts=1,
75
- max_texts=1,
76
- default_texts=["douyin"],
77
- keywords=["douyin"],
78
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cletrason/Cletrason-toad-mario-movie/gradio_utils.py DELETED
@@ -1,98 +0,0 @@
1
- import os
2
-
3
- # App Canny utils
4
-
5
-
6
- def edge_path_to_video_path(edge_path):
7
- video_path = edge_path
8
-
9
- vid_name = edge_path.split("/")[-1]
10
- if vid_name == "butterfly.mp4":
11
- video_path = "__assets__/canny_videos_mp4_2fps/butterfly.mp4"
12
- elif vid_name == "deer.mp4":
13
- video_path = "__assets__/canny_videos_mp4_2fps/deer.mp4"
14
- elif vid_name == "fox.mp4":
15
- video_path = "__assets__/canny_videos_mp4_2fps/fox.mp4"
16
- elif vid_name == "girl_dancing.mp4":
17
- video_path = "__assets__/canny_videos_mp4_2fps/girl_dancing.mp4"
18
- elif vid_name == "girl_turning.mp4":
19
- video_path = "__assets__/canny_videos_mp4_2fps/girl_turning.mp4"
20
- elif vid_name == "halloween.mp4":
21
- video_path = "__assets__/canny_videos_mp4_2fps/halloween.mp4"
22
- elif vid_name == "santa.mp4":
23
- video_path = "__assets__/canny_videos_mp4_2fps/santa.mp4"
24
-
25
- assert os.path.isfile(video_path)
26
- return video_path
27
-
28
-
29
- # App Pose utils
30
- def motion_to_video_path(motion):
31
- videos = [
32
- "__assets__/poses_skeleton_gifs/dance1_corr.mp4",
33
- "__assets__/poses_skeleton_gifs/dance2_corr.mp4",
34
- "__assets__/poses_skeleton_gifs/dance3_corr.mp4",
35
- "__assets__/poses_skeleton_gifs/dance4_corr.mp4",
36
- "__assets__/poses_skeleton_gifs/dance5_corr.mp4"
37
- ]
38
- if len(motion.split(" ")) > 1 and motion.split(" ")[1].isnumeric():
39
- id = int(motion.split(" ")[1]) - 1
40
- return videos[id]
41
- else:
42
- return motion
43
-
44
-
45
- # App Canny Dreambooth utils
46
- def get_video_from_canny_selection(canny_selection):
47
- if canny_selection == "woman1":
48
- input_video_path = "__assets__/db_files_2fps/woman1.mp4"
49
-
50
- elif canny_selection == "woman2":
51
- input_video_path = "__assets__/db_files_2fps/woman2.mp4"
52
-
53
- elif canny_selection == "man1":
54
- input_video_path = "__assets__/db_files_2fps/man1.mp4"
55
-
56
- elif canny_selection == "woman3":
57
- input_video_path = "__assets__/db_files_2fps/woman3.mp4"
58
- else:
59
- input_video_path = canny_selection
60
-
61
- assert os.path.isfile(input_video_path)
62
- return input_video_path
63
-
64
-
65
- def get_model_from_db_selection(db_selection):
66
- if db_selection == "Anime DB":
67
- input_video_path = 'PAIR/text2video-zero-controlnet-canny-anime'
68
- elif db_selection == "Avatar DB":
69
- input_video_path = 'PAIR/text2video-zero-controlnet-canny-avatar'
70
- elif db_selection == "GTA-5 DB":
71
- input_video_path = 'PAIR/text2video-zero-controlnet-canny-gta5'
72
- elif db_selection == "Arcane DB":
73
- input_video_path = 'PAIR/text2video-zero-controlnet-canny-arcane'
74
- else:
75
- input_video_path = db_selection
76
-
77
- return input_video_path
78
-
79
-
80
- def get_db_name_from_id(id):
81
- db_names = ["Anime DB", "Arcane DB", "GTA-5 DB", "Avatar DB"]
82
- return db_names[id]
83
-
84
-
85
- def get_canny_name_from_id(id):
86
- canny_names = ["woman1", "woman2", "man1", "woman3"]
87
- return canny_names[id]
88
-
89
-
90
- def logo_name_to_path(name):
91
- logo_paths = {
92
- 'Picsart AI Research': '__assets__/pair_watermark.png',
93
- 'Text2Video-Zero': '__assets__/t2v-z_watermark.png',
94
- 'None': None
95
- }
96
- if name in logo_paths:
97
- return logo_paths[name]
98
- return name
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat/g4f/Provider/Providers/DeepAi.py DELETED
@@ -1,46 +0,0 @@
1
- import os
2
- import json
3
- import random
4
- import hashlib
5
- import requests
6
-
7
- from ...typing import sha256, Dict, get_type_hints
8
-
9
- url = 'https://deepai.org'
10
- model = ['gpt-3.5-turbo']
11
- supports_stream = True
12
- needs_auth = False
13
-
14
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
15
- def md5(text: str) -> str:
16
- return hashlib.md5(text.encode()).hexdigest()[::-1]
17
-
18
-
19
- def get_api_key(user_agent: str) -> str:
20
- part1 = str(random.randint(0, 10**11))
21
- part2 = md5(user_agent + md5(user_agent + md5(user_agent + part1 + "x")))
22
-
23
- return f"tryit-{part1}-{part2}"
24
-
25
- user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
26
-
27
- headers = {
28
- "api-key": get_api_key(user_agent),
29
- "user-agent": user_agent
30
- }
31
-
32
- files = {
33
- "chat_style": (None, "chat"),
34
- "chatHistory": (None, json.dumps(messages))
35
- }
36
-
37
- r = requests.post("https://api.deepai.org/chat_response", headers=headers, files=files, stream=True)
38
-
39
- for chunk in r.iter_content(chunk_size=None):
40
- r.raise_for_status()
41
- yield chunk.decode()
42
-
43
-
44
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
45
- '(%s)' % ', '.join(
46
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CuriousDolphin/MobileSAM/README.md DELETED
@@ -1,45 +0,0 @@
1
- ---
2
- title: MobileSAM
3
- emoji: 🐠
4
- colorFrom: indigo
5
- colorTo: yellow
6
- sdk: gradio
7
- python_version: 3.8.10
8
- sdk_version: 3.35.2
9
- app_file: app.py
10
- pinned: false
11
- license: apache-2.0
12
- duplicated_from: dhkim2810/MobileSAM
13
- ---
14
-
15
- # Faster Segment Anything(MobileSAM)
16
-
17
- Official PyTorch Implementation of the <a href="https://github.com/ChaoningZhang/MobileSAM">.
18
-
19
-
20
- **MobileSAM** performs on par with the original SAM (at least visually) and keeps exactly the same pipeline as the original SAM except for a change on the image encoder.
21
- Specifically, we replace the original heavyweight ViT-H encoder (632M) with a much smaller Tiny-ViT (5M). On a single GPU, MobileSAM runs around 12ms per image: 8ms on the image encoder and 4ms on the mask decoder.
22
-
23
-
24
- ## License
25
-
26
- The model is licensed under the [Apache 2.0 license](LICENSE).
27
-
28
-
29
- ## Acknowledgement
30
-
31
- - [Segment Anything](https://segment-anything.com/) provides the SA-1B dataset and the base codes.
32
- - [TinyViT](https://github.com/microsoft/Cream/tree/main/TinyViT) provides codes and pre-trained models.
33
-
34
- ## Citing MobileSAM
35
-
36
- If you find this project useful for your research, please consider citing the following BibTeX entry.
37
-
38
- ```bibtex
39
- @article{mobile_sam,
40
- title={Faster Segment Anything: Towards Lightweight SAM for Mobile Applications},
41
- author={Zhang, Chaoning and Han, Dongshen and Qiao, Yu and Kim, Jung Uk and Bae, Sung Ho and Lee, Seungkyu and Hong, Choong Seon},
42
- journal={arXiv preprint arXiv:2306.14289},
43
- year={2023}
44
- }
45
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/mtiLib/__init__.py DELETED
@@ -1,1402 +0,0 @@
1
- #!/usr/bin/python
2
-
3
- # FontDame-to-FontTools for OpenType Layout tables
4
- #
5
- # Source language spec is available at:
6
- # http://monotype.github.io/OpenType_Table_Source/otl_source.html
7
- # https://github.com/Monotype/OpenType_Table_Source/
8
-
9
- from fontTools import ttLib
10
- from fontTools.ttLib.tables._c_m_a_p import cmap_classes
11
- from fontTools.ttLib.tables import otTables as ot
12
- from fontTools.ttLib.tables.otBase import ValueRecord, valueRecordFormatDict
13
- from fontTools.otlLib import builder as otl
14
- from contextlib import contextmanager
15
- from fontTools.ttLib import newTable
16
- from fontTools.feaLib.lookupDebugInfo import LOOKUP_DEBUG_ENV_VAR, LOOKUP_DEBUG_INFO_KEY
17
- from operator import setitem
18
- import os
19
- import logging
20
-
21
-
22
- class MtiLibError(Exception):
23
- pass
24
-
25
-
26
- class ReferenceNotFoundError(MtiLibError):
27
- pass
28
-
29
-
30
- class FeatureNotFoundError(ReferenceNotFoundError):
31
- pass
32
-
33
-
34
- class LookupNotFoundError(ReferenceNotFoundError):
35
- pass
36
-
37
-
38
- log = logging.getLogger("fontTools.mtiLib")
39
-
40
-
41
- def makeGlyph(s):
42
- if s[:2] in ["U ", "u "]:
43
- return ttLib.TTFont._makeGlyphName(int(s[2:], 16))
44
- elif s[:2] == "# ":
45
- return "glyph%.5d" % int(s[2:])
46
- assert s.find(" ") < 0, "Space found in glyph name: %s" % s
47
- assert s, "Glyph name is empty"
48
- return s
49
-
50
-
51
- def makeGlyphs(l):
52
- return [makeGlyph(g) for g in l]
53
-
54
-
55
- def mapLookup(sym, mapping):
56
- # Lookups are addressed by name. So resolved them using a map if available.
57
- # Fallback to parsing as lookup index if a map isn't provided.
58
- if mapping is not None:
59
- try:
60
- idx = mapping[sym]
61
- except KeyError:
62
- raise LookupNotFoundError(sym)
63
- else:
64
- idx = int(sym)
65
- return idx
66
-
67
-
68
- def mapFeature(sym, mapping):
69
- # Features are referenced by index according the spec. So, if symbol is an
70
- # integer, use it directly. Otherwise look up in the map if provided.
71
- try:
72
- idx = int(sym)
73
- except ValueError:
74
- try:
75
- idx = mapping[sym]
76
- except KeyError:
77
- raise FeatureNotFoundError(sym)
78
- return idx
79
-
80
-
81
- def setReference(mapper, mapping, sym, setter, collection, key):
82
- try:
83
- mapped = mapper(sym, mapping)
84
- except ReferenceNotFoundError as e:
85
- try:
86
- if mapping is not None:
87
- mapping.addDeferredMapping(
88
- lambda ref: setter(collection, key, ref), sym, e
89
- )
90
- return
91
- except AttributeError:
92
- pass
93
- raise
94
- setter(collection, key, mapped)
95
-
96
-
97
- class DeferredMapping(dict):
98
- def __init__(self):
99
- self._deferredMappings = []
100
-
101
- def addDeferredMapping(self, setter, sym, e):
102
- log.debug("Adding deferred mapping for symbol '%s' %s", sym, type(e).__name__)
103
- self._deferredMappings.append((setter, sym, e))
104
-
105
- def applyDeferredMappings(self):
106
- for setter, sym, e in self._deferredMappings:
107
- log.debug(
108
- "Applying deferred mapping for symbol '%s' %s", sym, type(e).__name__
109
- )
110
- try:
111
- mapped = self[sym]
112
- except KeyError:
113
- raise e
114
- setter(mapped)
115
- log.debug("Set to %s", mapped)
116
- self._deferredMappings = []
117
-
118
-
119
- def parseScriptList(lines, featureMap=None):
120
- self = ot.ScriptList()
121
- records = []
122
- with lines.between("script table"):
123
- for line in lines:
124
- while len(line) < 4:
125
- line.append("")
126
- scriptTag, langSysTag, defaultFeature, features = line
127
- log.debug("Adding script %s language-system %s", scriptTag, langSysTag)
128
-
129
- langSys = ot.LangSys()
130
- langSys.LookupOrder = None
131
- if defaultFeature:
132
- setReference(
133
- mapFeature,
134
- featureMap,
135
- defaultFeature,
136
- setattr,
137
- langSys,
138
- "ReqFeatureIndex",
139
- )
140
- else:
141
- langSys.ReqFeatureIndex = 0xFFFF
142
- syms = stripSplitComma(features)
143
- langSys.FeatureIndex = theList = [3] * len(syms)
144
- for i, sym in enumerate(syms):
145
- setReference(mapFeature, featureMap, sym, setitem, theList, i)
146
- langSys.FeatureCount = len(langSys.FeatureIndex)
147
-
148
- script = [s for s in records if s.ScriptTag == scriptTag]
149
- if script:
150
- script = script[0].Script
151
- else:
152
- scriptRec = ot.ScriptRecord()
153
- scriptRec.ScriptTag = scriptTag + " " * (4 - len(scriptTag))
154
- scriptRec.Script = ot.Script()
155
- records.append(scriptRec)
156
- script = scriptRec.Script
157
- script.DefaultLangSys = None
158
- script.LangSysRecord = []
159
- script.LangSysCount = 0
160
-
161
- if langSysTag == "default":
162
- script.DefaultLangSys = langSys
163
- else:
164
- langSysRec = ot.LangSysRecord()
165
- langSysRec.LangSysTag = langSysTag + " " * (4 - len(langSysTag))
166
- langSysRec.LangSys = langSys
167
- script.LangSysRecord.append(langSysRec)
168
- script.LangSysCount = len(script.LangSysRecord)
169
-
170
- for script in records:
171
- script.Script.LangSysRecord = sorted(
172
- script.Script.LangSysRecord, key=lambda rec: rec.LangSysTag
173
- )
174
- self.ScriptRecord = sorted(records, key=lambda rec: rec.ScriptTag)
175
- self.ScriptCount = len(self.ScriptRecord)
176
- return self
177
-
178
-
179
- def parseFeatureList(lines, lookupMap=None, featureMap=None):
180
- self = ot.FeatureList()
181
- self.FeatureRecord = []
182
- with lines.between("feature table"):
183
- for line in lines:
184
- name, featureTag, lookups = line
185
- if featureMap is not None:
186
- assert name not in featureMap, "Duplicate feature name: %s" % name
187
- featureMap[name] = len(self.FeatureRecord)
188
- # If feature name is integer, make sure it matches its index.
189
- try:
190
- assert int(name) == len(self.FeatureRecord), "%d %d" % (
191
- name,
192
- len(self.FeatureRecord),
193
- )
194
- except ValueError:
195
- pass
196
- featureRec = ot.FeatureRecord()
197
- featureRec.FeatureTag = featureTag
198
- featureRec.Feature = ot.Feature()
199
- self.FeatureRecord.append(featureRec)
200
- feature = featureRec.Feature
201
- feature.FeatureParams = None
202
- syms = stripSplitComma(lookups)
203
- feature.LookupListIndex = theList = [None] * len(syms)
204
- for i, sym in enumerate(syms):
205
- setReference(mapLookup, lookupMap, sym, setitem, theList, i)
206
- feature.LookupCount = len(feature.LookupListIndex)
207
-
208
- self.FeatureCount = len(self.FeatureRecord)
209
- return self
210
-
211
-
212
- def parseLookupFlags(lines):
213
- flags = 0
214
- filterset = None
215
- allFlags = [
216
- "righttoleft",
217
- "ignorebaseglyphs",
218
- "ignoreligatures",
219
- "ignoremarks",
220
- "markattachmenttype",
221
- "markfiltertype",
222
- ]
223
- while lines.peeks()[0].lower() in allFlags:
224
- line = next(lines)
225
- flag = {
226
- "righttoleft": 0x0001,
227
- "ignorebaseglyphs": 0x0002,
228
- "ignoreligatures": 0x0004,
229
- "ignoremarks": 0x0008,
230
- }.get(line[0].lower())
231
- if flag:
232
- assert line[1].lower() in ["yes", "no"], line[1]
233
- if line[1].lower() == "yes":
234
- flags |= flag
235
- continue
236
- if line[0].lower() == "markattachmenttype":
237
- flags |= int(line[1]) << 8
238
- continue
239
- if line[0].lower() == "markfiltertype":
240
- flags |= 0x10
241
- filterset = int(line[1])
242
- return flags, filterset
243
-
244
-
245
- def parseSingleSubst(lines, font, _lookupMap=None):
246
- mapping = {}
247
- for line in lines:
248
- assert len(line) == 2, line
249
- line = makeGlyphs(line)
250
- mapping[line[0]] = line[1]
251
- return otl.buildSingleSubstSubtable(mapping)
252
-
253
-
254
- def parseMultiple(lines, font, _lookupMap=None):
255
- mapping = {}
256
- for line in lines:
257
- line = makeGlyphs(line)
258
- mapping[line[0]] = line[1:]
259
- return otl.buildMultipleSubstSubtable(mapping)
260
-
261
-
262
- def parseAlternate(lines, font, _lookupMap=None):
263
- mapping = {}
264
- for line in lines:
265
- line = makeGlyphs(line)
266
- mapping[line[0]] = line[1:]
267
- return otl.buildAlternateSubstSubtable(mapping)
268
-
269
-
270
- def parseLigature(lines, font, _lookupMap=None):
271
- mapping = {}
272
- for line in lines:
273
- assert len(line) >= 2, line
274
- line = makeGlyphs(line)
275
- mapping[tuple(line[1:])] = line[0]
276
- return otl.buildLigatureSubstSubtable(mapping)
277
-
278
-
279
- def parseSinglePos(lines, font, _lookupMap=None):
280
- values = {}
281
- for line in lines:
282
- assert len(line) == 3, line
283
- w = line[0].title().replace(" ", "")
284
- assert w in valueRecordFormatDict
285
- g = makeGlyph(line[1])
286
- v = int(line[2])
287
- if g not in values:
288
- values[g] = ValueRecord()
289
- assert not hasattr(values[g], w), (g, w)
290
- setattr(values[g], w, v)
291
- return otl.buildSinglePosSubtable(values, font.getReverseGlyphMap())
292
-
293
-
294
- def parsePair(lines, font, _lookupMap=None):
295
- self = ot.PairPos()
296
- self.ValueFormat1 = self.ValueFormat2 = 0
297
- typ = lines.peeks()[0].split()[0].lower()
298
- if typ in ("left", "right"):
299
- self.Format = 1
300
- values = {}
301
- for line in lines:
302
- assert len(line) == 4, line
303
- side = line[0].split()[0].lower()
304
- assert side in ("left", "right"), side
305
- what = line[0][len(side) :].title().replace(" ", "")
306
- mask = valueRecordFormatDict[what][0]
307
- glyph1, glyph2 = makeGlyphs(line[1:3])
308
- value = int(line[3])
309
- if not glyph1 in values:
310
- values[glyph1] = {}
311
- if not glyph2 in values[glyph1]:
312
- values[glyph1][glyph2] = (ValueRecord(), ValueRecord())
313
- rec2 = values[glyph1][glyph2]
314
- if side == "left":
315
- self.ValueFormat1 |= mask
316
- vr = rec2[0]
317
- else:
318
- self.ValueFormat2 |= mask
319
- vr = rec2[1]
320
- assert not hasattr(vr, what), (vr, what)
321
- setattr(vr, what, value)
322
- self.Coverage = makeCoverage(set(values.keys()), font)
323
- self.PairSet = []
324
- for glyph1 in self.Coverage.glyphs:
325
- values1 = values[glyph1]
326
- pairset = ot.PairSet()
327
- records = pairset.PairValueRecord = []
328
- for glyph2 in sorted(values1.keys(), key=font.getGlyphID):
329
- values2 = values1[glyph2]
330
- pair = ot.PairValueRecord()
331
- pair.SecondGlyph = glyph2
332
- pair.Value1 = values2[0]
333
- pair.Value2 = values2[1] if self.ValueFormat2 else None
334
- records.append(pair)
335
- pairset.PairValueCount = len(pairset.PairValueRecord)
336
- self.PairSet.append(pairset)
337
- self.PairSetCount = len(self.PairSet)
338
- elif typ.endswith("class"):
339
- self.Format = 2
340
- classDefs = [None, None]
341
- while lines.peeks()[0].endswith("class definition begin"):
342
- typ = lines.peek()[0][: -len("class definition begin")].lower()
343
- idx, klass = {
344
- "first": (0, ot.ClassDef1),
345
- "second": (1, ot.ClassDef2),
346
- }[typ]
347
- assert classDefs[idx] is None
348
- classDefs[idx] = parseClassDef(lines, font, klass=klass)
349
- self.ClassDef1, self.ClassDef2 = classDefs
350
- self.Class1Count, self.Class2Count = (
351
- 1 + max(c.classDefs.values()) for c in classDefs
352
- )
353
- self.Class1Record = [ot.Class1Record() for i in range(self.Class1Count)]
354
- for rec1 in self.Class1Record:
355
- rec1.Class2Record = [ot.Class2Record() for j in range(self.Class2Count)]
356
- for rec2 in rec1.Class2Record:
357
- rec2.Value1 = ValueRecord()
358
- rec2.Value2 = ValueRecord()
359
- for line in lines:
360
- assert len(line) == 4, line
361
- side = line[0].split()[0].lower()
362
- assert side in ("left", "right"), side
363
- what = line[0][len(side) :].title().replace(" ", "")
364
- mask = valueRecordFormatDict[what][0]
365
- class1, class2, value = (int(x) for x in line[1:4])
366
- rec2 = self.Class1Record[class1].Class2Record[class2]
367
- if side == "left":
368
- self.ValueFormat1 |= mask
369
- vr = rec2.Value1
370
- else:
371
- self.ValueFormat2 |= mask
372
- vr = rec2.Value2
373
- assert not hasattr(vr, what), (vr, what)
374
- setattr(vr, what, value)
375
- for rec1 in self.Class1Record:
376
- for rec2 in rec1.Class2Record:
377
- rec2.Value1 = ValueRecord(self.ValueFormat1, rec2.Value1)
378
- rec2.Value2 = (
379
- ValueRecord(self.ValueFormat2, rec2.Value2)
380
- if self.ValueFormat2
381
- else None
382
- )
383
-
384
- self.Coverage = makeCoverage(set(self.ClassDef1.classDefs.keys()), font)
385
- else:
386
- assert 0, typ
387
- return self
388
-
389
-
390
- def parseKernset(lines, font, _lookupMap=None):
391
- typ = lines.peeks()[0].split()[0].lower()
392
- if typ in ("left", "right"):
393
- with lines.until(
394
- ("firstclass definition begin", "secondclass definition begin")
395
- ):
396
- return parsePair(lines, font)
397
- return parsePair(lines, font)
398
-
399
-
400
- def makeAnchor(data, klass=ot.Anchor):
401
- assert len(data) <= 2
402
- anchor = klass()
403
- anchor.Format = 1
404
- anchor.XCoordinate, anchor.YCoordinate = intSplitComma(data[0])
405
- if len(data) > 1 and data[1] != "":
406
- anchor.Format = 2
407
- anchor.AnchorPoint = int(data[1])
408
- return anchor
409
-
410
-
411
- def parseCursive(lines, font, _lookupMap=None):
412
- records = {}
413
- for line in lines:
414
- assert len(line) in [3, 4], line
415
- idx, klass = {
416
- "entry": (0, ot.EntryAnchor),
417
- "exit": (1, ot.ExitAnchor),
418
- }[line[0]]
419
- glyph = makeGlyph(line[1])
420
- if glyph not in records:
421
- records[glyph] = [None, None]
422
- assert records[glyph][idx] is None, (glyph, idx)
423
- records[glyph][idx] = makeAnchor(line[2:], klass)
424
- return otl.buildCursivePosSubtable(records, font.getReverseGlyphMap())
425
-
426
-
427
- def makeMarkRecords(data, coverage, c):
428
- records = []
429
- for glyph in coverage.glyphs:
430
- klass, anchor = data[glyph]
431
- record = c.MarkRecordClass()
432
- record.Class = klass
433
- setattr(record, c.MarkAnchor, anchor)
434
- records.append(record)
435
- return records
436
-
437
-
438
- def makeBaseRecords(data, coverage, c, classCount):
439
- records = []
440
- idx = {}
441
- for glyph in coverage.glyphs:
442
- idx[glyph] = len(records)
443
- record = c.BaseRecordClass()
444
- anchors = [None] * classCount
445
- setattr(record, c.BaseAnchor, anchors)
446
- records.append(record)
447
- for (glyph, klass), anchor in data.items():
448
- record = records[idx[glyph]]
449
- anchors = getattr(record, c.BaseAnchor)
450
- assert anchors[klass] is None, (glyph, klass)
451
- anchors[klass] = anchor
452
- return records
453
-
454
-
455
- def makeLigatureRecords(data, coverage, c, classCount):
456
- records = [None] * len(coverage.glyphs)
457
- idx = {g: i for i, g in enumerate(coverage.glyphs)}
458
-
459
- for (glyph, klass, compIdx, compCount), anchor in data.items():
460
- record = records[idx[glyph]]
461
- if record is None:
462
- record = records[idx[glyph]] = ot.LigatureAttach()
463
- record.ComponentCount = compCount
464
- record.ComponentRecord = [ot.ComponentRecord() for i in range(compCount)]
465
- for compRec in record.ComponentRecord:
466
- compRec.LigatureAnchor = [None] * classCount
467
- assert record.ComponentCount == compCount, (
468
- glyph,
469
- record.ComponentCount,
470
- compCount,
471
- )
472
-
473
- anchors = record.ComponentRecord[compIdx - 1].LigatureAnchor
474
- assert anchors[klass] is None, (glyph, compIdx, klass)
475
- anchors[klass] = anchor
476
- return records
477
-
478
-
479
- def parseMarkToSomething(lines, font, c):
480
- self = c.Type()
481
- self.Format = 1
482
- markData = {}
483
- baseData = {}
484
- Data = {
485
- "mark": (markData, c.MarkAnchorClass),
486
- "base": (baseData, c.BaseAnchorClass),
487
- "ligature": (baseData, c.BaseAnchorClass),
488
- }
489
- maxKlass = 0
490
- for line in lines:
491
- typ = line[0]
492
- assert typ in ("mark", "base", "ligature")
493
- glyph = makeGlyph(line[1])
494
- data, anchorClass = Data[typ]
495
- extraItems = 2 if typ == "ligature" else 0
496
- extras = tuple(int(i) for i in line[2 : 2 + extraItems])
497
- klass = int(line[2 + extraItems])
498
- anchor = makeAnchor(line[3 + extraItems :], anchorClass)
499
- if typ == "mark":
500
- key, value = glyph, (klass, anchor)
501
- else:
502
- key, value = ((glyph, klass) + extras), anchor
503
- assert key not in data, key
504
- data[key] = value
505
- maxKlass = max(maxKlass, klass)
506
-
507
- # Mark
508
- markCoverage = makeCoverage(set(markData.keys()), font, c.MarkCoverageClass)
509
- markArray = c.MarkArrayClass()
510
- markRecords = makeMarkRecords(markData, markCoverage, c)
511
- setattr(markArray, c.MarkRecord, markRecords)
512
- setattr(markArray, c.MarkCount, len(markRecords))
513
- setattr(self, c.MarkCoverage, markCoverage)
514
- setattr(self, c.MarkArray, markArray)
515
- self.ClassCount = maxKlass + 1
516
-
517
- # Base
518
- self.classCount = 0 if not baseData else 1 + max(k[1] for k, v in baseData.items())
519
- baseCoverage = makeCoverage(
520
- set([k[0] for k in baseData.keys()]), font, c.BaseCoverageClass
521
- )
522
- baseArray = c.BaseArrayClass()
523
- if c.Base == "Ligature":
524
- baseRecords = makeLigatureRecords(baseData, baseCoverage, c, self.classCount)
525
- else:
526
- baseRecords = makeBaseRecords(baseData, baseCoverage, c, self.classCount)
527
- setattr(baseArray, c.BaseRecord, baseRecords)
528
- setattr(baseArray, c.BaseCount, len(baseRecords))
529
- setattr(self, c.BaseCoverage, baseCoverage)
530
- setattr(self, c.BaseArray, baseArray)
531
-
532
- return self
533
-
534
-
535
- class MarkHelper(object):
536
- def __init__(self):
537
- for Which in ("Mark", "Base"):
538
- for What in ("Coverage", "Array", "Count", "Record", "Anchor"):
539
- key = Which + What
540
- if Which == "Mark" and What in ("Count", "Record", "Anchor"):
541
- value = key
542
- else:
543
- value = getattr(self, Which) + What
544
- if value == "LigatureRecord":
545
- value = "LigatureAttach"
546
- setattr(self, key, value)
547
- if What != "Count":
548
- klass = getattr(ot, value)
549
- setattr(self, key + "Class", klass)
550
-
551
-
552
- class MarkToBaseHelper(MarkHelper):
553
- Mark = "Mark"
554
- Base = "Base"
555
- Type = ot.MarkBasePos
556
-
557
-
558
- class MarkToMarkHelper(MarkHelper):
559
- Mark = "Mark1"
560
- Base = "Mark2"
561
- Type = ot.MarkMarkPos
562
-
563
-
564
- class MarkToLigatureHelper(MarkHelper):
565
- Mark = "Mark"
566
- Base = "Ligature"
567
- Type = ot.MarkLigPos
568
-
569
-
570
- def parseMarkToBase(lines, font, _lookupMap=None):
571
- return parseMarkToSomething(lines, font, MarkToBaseHelper())
572
-
573
-
574
- def parseMarkToMark(lines, font, _lookupMap=None):
575
- return parseMarkToSomething(lines, font, MarkToMarkHelper())
576
-
577
-
578
- def parseMarkToLigature(lines, font, _lookupMap=None):
579
- return parseMarkToSomething(lines, font, MarkToLigatureHelper())
580
-
581
-
582
- def stripSplitComma(line):
583
- return [s.strip() for s in line.split(",")] if line else []
584
-
585
-
586
- def intSplitComma(line):
587
- return [int(i) for i in line.split(",")] if line else []
588
-
589
-
590
- # Copied from fontTools.subset
591
- class ContextHelper(object):
592
- def __init__(self, klassName, Format):
593
- if klassName.endswith("Subst"):
594
- Typ = "Sub"
595
- Type = "Subst"
596
- else:
597
- Typ = "Pos"
598
- Type = "Pos"
599
- if klassName.startswith("Chain"):
600
- Chain = "Chain"
601
- InputIdx = 1
602
- DataLen = 3
603
- else:
604
- Chain = ""
605
- InputIdx = 0
606
- DataLen = 1
607
- ChainTyp = Chain + Typ
608
-
609
- self.Typ = Typ
610
- self.Type = Type
611
- self.Chain = Chain
612
- self.ChainTyp = ChainTyp
613
- self.InputIdx = InputIdx
614
- self.DataLen = DataLen
615
-
616
- self.LookupRecord = Type + "LookupRecord"
617
-
618
- if Format == 1:
619
- Coverage = lambda r: r.Coverage
620
- ChainCoverage = lambda r: r.Coverage
621
- ContextData = lambda r: (None,)
622
- ChainContextData = lambda r: (None, None, None)
623
- SetContextData = None
624
- SetChainContextData = None
625
- RuleData = lambda r: (r.Input,)
626
- ChainRuleData = lambda r: (r.Backtrack, r.Input, r.LookAhead)
627
-
628
- def SetRuleData(r, d):
629
- (r.Input,) = d
630
- (r.GlyphCount,) = (len(x) + 1 for x in d)
631
-
632
- def ChainSetRuleData(r, d):
633
- (r.Backtrack, r.Input, r.LookAhead) = d
634
- (
635
- r.BacktrackGlyphCount,
636
- r.InputGlyphCount,
637
- r.LookAheadGlyphCount,
638
- ) = (len(d[0]), len(d[1]) + 1, len(d[2]))
639
-
640
- elif Format == 2:
641
- Coverage = lambda r: r.Coverage
642
- ChainCoverage = lambda r: r.Coverage
643
- ContextData = lambda r: (r.ClassDef,)
644
- ChainContextData = lambda r: (
645
- r.BacktrackClassDef,
646
- r.InputClassDef,
647
- r.LookAheadClassDef,
648
- )
649
-
650
- def SetContextData(r, d):
651
- (r.ClassDef,) = d
652
-
653
- def SetChainContextData(r, d):
654
- (r.BacktrackClassDef, r.InputClassDef, r.LookAheadClassDef) = d
655
-
656
- RuleData = lambda r: (r.Class,)
657
- ChainRuleData = lambda r: (r.Backtrack, r.Input, r.LookAhead)
658
-
659
- def SetRuleData(r, d):
660
- (r.Class,) = d
661
- (r.GlyphCount,) = (len(x) + 1 for x in d)
662
-
663
- def ChainSetRuleData(r, d):
664
- (r.Backtrack, r.Input, r.LookAhead) = d
665
- (
666
- r.BacktrackGlyphCount,
667
- r.InputGlyphCount,
668
- r.LookAheadGlyphCount,
669
- ) = (len(d[0]), len(d[1]) + 1, len(d[2]))
670
-
671
- elif Format == 3:
672
- Coverage = lambda r: r.Coverage[0]
673
- ChainCoverage = lambda r: r.InputCoverage[0]
674
- ContextData = None
675
- ChainContextData = None
676
- SetContextData = None
677
- SetChainContextData = None
678
- RuleData = lambda r: r.Coverage
679
- ChainRuleData = lambda r: (
680
- r.BacktrackCoverage + r.InputCoverage + r.LookAheadCoverage
681
- )
682
-
683
- def SetRuleData(r, d):
684
- (r.Coverage,) = d
685
- (r.GlyphCount,) = (len(x) for x in d)
686
-
687
- def ChainSetRuleData(r, d):
688
- (r.BacktrackCoverage, r.InputCoverage, r.LookAheadCoverage) = d
689
- (
690
- r.BacktrackGlyphCount,
691
- r.InputGlyphCount,
692
- r.LookAheadGlyphCount,
693
- ) = (len(x) for x in d)
694
-
695
- else:
696
- assert 0, "unknown format: %s" % Format
697
-
698
- if Chain:
699
- self.Coverage = ChainCoverage
700
- self.ContextData = ChainContextData
701
- self.SetContextData = SetChainContextData
702
- self.RuleData = ChainRuleData
703
- self.SetRuleData = ChainSetRuleData
704
- else:
705
- self.Coverage = Coverage
706
- self.ContextData = ContextData
707
- self.SetContextData = SetContextData
708
- self.RuleData = RuleData
709
- self.SetRuleData = SetRuleData
710
-
711
- if Format == 1:
712
- self.Rule = ChainTyp + "Rule"
713
- self.RuleCount = ChainTyp + "RuleCount"
714
- self.RuleSet = ChainTyp + "RuleSet"
715
- self.RuleSetCount = ChainTyp + "RuleSetCount"
716
- self.Intersect = lambda glyphs, c, r: [r] if r in glyphs else []
717
- elif Format == 2:
718
- self.Rule = ChainTyp + "ClassRule"
719
- self.RuleCount = ChainTyp + "ClassRuleCount"
720
- self.RuleSet = ChainTyp + "ClassSet"
721
- self.RuleSetCount = ChainTyp + "ClassSetCount"
722
- self.Intersect = lambda glyphs, c, r: (
723
- c.intersect_class(glyphs, r)
724
- if c
725
- else (set(glyphs) if r == 0 else set())
726
- )
727
-
728
- self.ClassDef = "InputClassDef" if Chain else "ClassDef"
729
- self.ClassDefIndex = 1 if Chain else 0
730
- self.Input = "Input" if Chain else "Class"
731
-
732
-
733
- def parseLookupRecords(items, klassName, lookupMap=None):
734
- klass = getattr(ot, klassName)
735
- lst = []
736
- for item in items:
737
- rec = klass()
738
- item = stripSplitComma(item)
739
- assert len(item) == 2, item
740
- idx = int(item[0])
741
- assert idx > 0, idx
742
- rec.SequenceIndex = idx - 1
743
- setReference(mapLookup, lookupMap, item[1], setattr, rec, "LookupListIndex")
744
- lst.append(rec)
745
- return lst
746
-
747
-
748
- def makeClassDef(classDefs, font, klass=ot.Coverage):
749
- if not classDefs:
750
- return None
751
- self = klass()
752
- self.classDefs = dict(classDefs)
753
- return self
754
-
755
-
756
- def parseClassDef(lines, font, klass=ot.ClassDef):
757
- classDefs = {}
758
- with lines.between("class definition"):
759
- for line in lines:
760
- glyph = makeGlyph(line[0])
761
- assert glyph not in classDefs, glyph
762
- classDefs[glyph] = int(line[1])
763
- return makeClassDef(classDefs, font, klass)
764
-
765
-
766
- def makeCoverage(glyphs, font, klass=ot.Coverage):
767
- if not glyphs:
768
- return None
769
- if isinstance(glyphs, set):
770
- glyphs = sorted(glyphs)
771
- coverage = klass()
772
- coverage.glyphs = sorted(set(glyphs), key=font.getGlyphID)
773
- return coverage
774
-
775
-
776
- def parseCoverage(lines, font, klass=ot.Coverage):
777
- glyphs = []
778
- with lines.between("coverage definition"):
779
- for line in lines:
780
- glyphs.append(makeGlyph(line[0]))
781
- return makeCoverage(glyphs, font, klass)
782
-
783
-
784
- def bucketizeRules(self, c, rules, bucketKeys):
785
- buckets = {}
786
- for seq, recs in rules:
787
- buckets.setdefault(seq[c.InputIdx][0], []).append(
788
- (tuple(s[1 if i == c.InputIdx else 0 :] for i, s in enumerate(seq)), recs)
789
- )
790
-
791
- rulesets = []
792
- for firstGlyph in bucketKeys:
793
- if firstGlyph not in buckets:
794
- rulesets.append(None)
795
- continue
796
- thisRules = []
797
- for seq, recs in buckets[firstGlyph]:
798
- rule = getattr(ot, c.Rule)()
799
- c.SetRuleData(rule, seq)
800
- setattr(rule, c.Type + "Count", len(recs))
801
- setattr(rule, c.LookupRecord, recs)
802
- thisRules.append(rule)
803
-
804
- ruleset = getattr(ot, c.RuleSet)()
805
- setattr(ruleset, c.Rule, thisRules)
806
- setattr(ruleset, c.RuleCount, len(thisRules))
807
- rulesets.append(ruleset)
808
-
809
- setattr(self, c.RuleSet, rulesets)
810
- setattr(self, c.RuleSetCount, len(rulesets))
811
-
812
-
813
- def parseContext(lines, font, Type, lookupMap=None):
814
- self = getattr(ot, Type)()
815
- typ = lines.peeks()[0].split()[0].lower()
816
- if typ == "glyph":
817
- self.Format = 1
818
- log.debug("Parsing %s format %s", Type, self.Format)
819
- c = ContextHelper(Type, self.Format)
820
- rules = []
821
- for line in lines:
822
- assert line[0].lower() == "glyph", line[0]
823
- while len(line) < 1 + c.DataLen:
824
- line.append("")
825
- seq = tuple(makeGlyphs(stripSplitComma(i)) for i in line[1 : 1 + c.DataLen])
826
- recs = parseLookupRecords(line[1 + c.DataLen :], c.LookupRecord, lookupMap)
827
- rules.append((seq, recs))
828
-
829
- firstGlyphs = set(seq[c.InputIdx][0] for seq, recs in rules)
830
- self.Coverage = makeCoverage(firstGlyphs, font)
831
- bucketizeRules(self, c, rules, self.Coverage.glyphs)
832
- elif typ.endswith("class"):
833
- self.Format = 2
834
- log.debug("Parsing %s format %s", Type, self.Format)
835
- c = ContextHelper(Type, self.Format)
836
- classDefs = [None] * c.DataLen
837
- while lines.peeks()[0].endswith("class definition begin"):
838
- typ = lines.peek()[0][: -len("class definition begin")].lower()
839
- idx, klass = {
840
- 1: {
841
- "": (0, ot.ClassDef),
842
- },
843
- 3: {
844
- "backtrack": (0, ot.BacktrackClassDef),
845
- "": (1, ot.InputClassDef),
846
- "lookahead": (2, ot.LookAheadClassDef),
847
- },
848
- }[c.DataLen][typ]
849
- assert classDefs[idx] is None, idx
850
- classDefs[idx] = parseClassDef(lines, font, klass=klass)
851
- c.SetContextData(self, classDefs)
852
- rules = []
853
- for line in lines:
854
- assert line[0].lower().startswith("class"), line[0]
855
- while len(line) < 1 + c.DataLen:
856
- line.append("")
857
- seq = tuple(intSplitComma(i) for i in line[1 : 1 + c.DataLen])
858
- recs = parseLookupRecords(line[1 + c.DataLen :], c.LookupRecord, lookupMap)
859
- rules.append((seq, recs))
860
- firstClasses = set(seq[c.InputIdx][0] for seq, recs in rules)
861
- firstGlyphs = set(
862
- g for g, c in classDefs[c.InputIdx].classDefs.items() if c in firstClasses
863
- )
864
- self.Coverage = makeCoverage(firstGlyphs, font)
865
- bucketizeRules(self, c, rules, range(max(firstClasses) + 1))
866
- elif typ.endswith("coverage"):
867
- self.Format = 3
868
- log.debug("Parsing %s format %s", Type, self.Format)
869
- c = ContextHelper(Type, self.Format)
870
- coverages = tuple([] for i in range(c.DataLen))
871
- while lines.peeks()[0].endswith("coverage definition begin"):
872
- typ = lines.peek()[0][: -len("coverage definition begin")].lower()
873
- idx, klass = {
874
- 1: {
875
- "": (0, ot.Coverage),
876
- },
877
- 3: {
878
- "backtrack": (0, ot.BacktrackCoverage),
879
- "input": (1, ot.InputCoverage),
880
- "lookahead": (2, ot.LookAheadCoverage),
881
- },
882
- }[c.DataLen][typ]
883
- coverages[idx].append(parseCoverage(lines, font, klass=klass))
884
- c.SetRuleData(self, coverages)
885
- lines = list(lines)
886
- assert len(lines) == 1
887
- line = lines[0]
888
- assert line[0].lower() == "coverage", line[0]
889
- recs = parseLookupRecords(line[1:], c.LookupRecord, lookupMap)
890
- setattr(self, c.Type + "Count", len(recs))
891
- setattr(self, c.LookupRecord, recs)
892
- else:
893
- assert 0, typ
894
- return self
895
-
896
-
897
- def parseContextSubst(lines, font, lookupMap=None):
898
- return parseContext(lines, font, "ContextSubst", lookupMap=lookupMap)
899
-
900
-
901
- def parseContextPos(lines, font, lookupMap=None):
902
- return parseContext(lines, font, "ContextPos", lookupMap=lookupMap)
903
-
904
-
905
- def parseChainedSubst(lines, font, lookupMap=None):
906
- return parseContext(lines, font, "ChainContextSubst", lookupMap=lookupMap)
907
-
908
-
909
- def parseChainedPos(lines, font, lookupMap=None):
910
- return parseContext(lines, font, "ChainContextPos", lookupMap=lookupMap)
911
-
912
-
913
- def parseReverseChainedSubst(lines, font, _lookupMap=None):
914
- self = ot.ReverseChainSingleSubst()
915
- self.Format = 1
916
- coverages = ([], [])
917
- while lines.peeks()[0].endswith("coverage definition begin"):
918
- typ = lines.peek()[0][: -len("coverage definition begin")].lower()
919
- idx, klass = {
920
- "backtrack": (0, ot.BacktrackCoverage),
921
- "lookahead": (1, ot.LookAheadCoverage),
922
- }[typ]
923
- coverages[idx].append(parseCoverage(lines, font, klass=klass))
924
- self.BacktrackCoverage = coverages[0]
925
- self.BacktrackGlyphCount = len(self.BacktrackCoverage)
926
- self.LookAheadCoverage = coverages[1]
927
- self.LookAheadGlyphCount = len(self.LookAheadCoverage)
928
- mapping = {}
929
- for line in lines:
930
- assert len(line) == 2, line
931
- line = makeGlyphs(line)
932
- mapping[line[0]] = line[1]
933
- self.Coverage = makeCoverage(set(mapping.keys()), font)
934
- self.Substitute = [mapping[k] for k in self.Coverage.glyphs]
935
- self.GlyphCount = len(self.Substitute)
936
- return self
937
-
938
-
939
- def parseLookup(lines, tableTag, font, lookupMap=None):
940
- line = lines.expect("lookup")
941
- _, name, typ = line
942
- log.debug("Parsing lookup type %s %s", typ, name)
943
- lookup = ot.Lookup()
944
- lookup.LookupFlag, filterset = parseLookupFlags(lines)
945
- if filterset is not None:
946
- lookup.MarkFilteringSet = filterset
947
- lookup.LookupType, parseLookupSubTable = {
948
- "GSUB": {
949
- "single": (1, parseSingleSubst),
950
- "multiple": (2, parseMultiple),
951
- "alternate": (3, parseAlternate),
952
- "ligature": (4, parseLigature),
953
- "context": (5, parseContextSubst),
954
- "chained": (6, parseChainedSubst),
955
- "reversechained": (8, parseReverseChainedSubst),
956
- },
957
- "GPOS": {
958
- "single": (1, parseSinglePos),
959
- "pair": (2, parsePair),
960
- "kernset": (2, parseKernset),
961
- "cursive": (3, parseCursive),
962
- "mark to base": (4, parseMarkToBase),
963
- "mark to ligature": (5, parseMarkToLigature),
964
- "mark to mark": (6, parseMarkToMark),
965
- "context": (7, parseContextPos),
966
- "chained": (8, parseChainedPos),
967
- },
968
- }[tableTag][typ]
969
-
970
- with lines.until("lookup end"):
971
- subtables = []
972
-
973
- while lines.peek():
974
- with lines.until(("% subtable", "subtable end")):
975
- while lines.peek():
976
- subtable = parseLookupSubTable(lines, font, lookupMap)
977
- assert lookup.LookupType == subtable.LookupType
978
- subtables.append(subtable)
979
- if lines.peeks()[0] in ("% subtable", "subtable end"):
980
- next(lines)
981
- lines.expect("lookup end")
982
-
983
- lookup.SubTable = subtables
984
- lookup.SubTableCount = len(lookup.SubTable)
985
- if lookup.SubTableCount == 0:
986
- # Remove this return when following is fixed:
987
- # https://github.com/fonttools/fonttools/issues/789
988
- return None
989
- return lookup
990
-
991
-
992
- def parseGSUBGPOS(lines, font, tableTag):
993
- container = ttLib.getTableClass(tableTag)()
994
- lookupMap = DeferredMapping()
995
- featureMap = DeferredMapping()
996
- assert tableTag in ("GSUB", "GPOS")
997
- log.debug("Parsing %s", tableTag)
998
- self = getattr(ot, tableTag)()
999
- self.Version = 0x00010000
1000
- fields = {
1001
- "script table begin": (
1002
- "ScriptList",
1003
- lambda lines: parseScriptList(lines, featureMap),
1004
- ),
1005
- "feature table begin": (
1006
- "FeatureList",
1007
- lambda lines: parseFeatureList(lines, lookupMap, featureMap),
1008
- ),
1009
- "lookup": ("LookupList", None),
1010
- }
1011
- for attr, parser in fields.values():
1012
- setattr(self, attr, None)
1013
- while lines.peek() is not None:
1014
- typ = lines.peek()[0].lower()
1015
- if typ not in fields:
1016
- log.debug("Skipping %s", lines.peek())
1017
- next(lines)
1018
- continue
1019
- attr, parser = fields[typ]
1020
- if typ == "lookup":
1021
- if self.LookupList is None:
1022
- self.LookupList = ot.LookupList()
1023
- self.LookupList.Lookup = []
1024
- _, name, _ = lines.peek()
1025
- lookup = parseLookup(lines, tableTag, font, lookupMap)
1026
- if lookupMap is not None:
1027
- assert name not in lookupMap, "Duplicate lookup name: %s" % name
1028
- lookupMap[name] = len(self.LookupList.Lookup)
1029
- else:
1030
- assert int(name) == len(self.LookupList.Lookup), "%d %d" % (
1031
- name,
1032
- len(self.Lookup),
1033
- )
1034
- self.LookupList.Lookup.append(lookup)
1035
- else:
1036
- assert getattr(self, attr) is None, attr
1037
- setattr(self, attr, parser(lines))
1038
- if self.LookupList:
1039
- self.LookupList.LookupCount = len(self.LookupList.Lookup)
1040
- if lookupMap is not None:
1041
- lookupMap.applyDeferredMappings()
1042
- if os.environ.get(LOOKUP_DEBUG_ENV_VAR):
1043
- if "Debg" not in font:
1044
- font["Debg"] = newTable("Debg")
1045
- font["Debg"].data = {}
1046
- debug = (
1047
- font["Debg"]
1048
- .data.setdefault(LOOKUP_DEBUG_INFO_KEY, {})
1049
- .setdefault(tableTag, {})
1050
- )
1051
- for name, lookup in lookupMap.items():
1052
- debug[str(lookup)] = ["", name, ""]
1053
-
1054
- featureMap.applyDeferredMappings()
1055
- container.table = self
1056
- return container
1057
-
1058
-
1059
- def parseGSUB(lines, font):
1060
- return parseGSUBGPOS(lines, font, "GSUB")
1061
-
1062
-
1063
- def parseGPOS(lines, font):
1064
- return parseGSUBGPOS(lines, font, "GPOS")
1065
-
1066
-
1067
- def parseAttachList(lines, font):
1068
- points = {}
1069
- with lines.between("attachment list"):
1070
- for line in lines:
1071
- glyph = makeGlyph(line[0])
1072
- assert glyph not in points, glyph
1073
- points[glyph] = [int(i) for i in line[1:]]
1074
- return otl.buildAttachList(points, font.getReverseGlyphMap())
1075
-
1076
-
1077
- def parseCaretList(lines, font):
1078
- carets = {}
1079
- with lines.between("carets"):
1080
- for line in lines:
1081
- glyph = makeGlyph(line[0])
1082
- assert glyph not in carets, glyph
1083
- num = int(line[1])
1084
- thisCarets = [int(i) for i in line[2:]]
1085
- assert num == len(thisCarets), line
1086
- carets[glyph] = thisCarets
1087
- return otl.buildLigCaretList(carets, {}, font.getReverseGlyphMap())
1088
-
1089
-
1090
- def makeMarkFilteringSets(sets, font):
1091
- self = ot.MarkGlyphSetsDef()
1092
- self.MarkSetTableFormat = 1
1093
- self.MarkSetCount = 1 + max(sets.keys())
1094
- self.Coverage = [None] * self.MarkSetCount
1095
- for k, v in sorted(sets.items()):
1096
- self.Coverage[k] = makeCoverage(set(v), font)
1097
- return self
1098
-
1099
-
1100
- def parseMarkFilteringSets(lines, font):
1101
- sets = {}
1102
- with lines.between("set definition"):
1103
- for line in lines:
1104
- assert len(line) == 2, line
1105
- glyph = makeGlyph(line[0])
1106
- # TODO accept set names
1107
- st = int(line[1])
1108
- if st not in sets:
1109
- sets[st] = []
1110
- sets[st].append(glyph)
1111
- return makeMarkFilteringSets(sets, font)
1112
-
1113
-
1114
- def parseGDEF(lines, font):
1115
- container = ttLib.getTableClass("GDEF")()
1116
- log.debug("Parsing GDEF")
1117
- self = ot.GDEF()
1118
- fields = {
1119
- "class definition begin": (
1120
- "GlyphClassDef",
1121
- lambda lines, font: parseClassDef(lines, font, klass=ot.GlyphClassDef),
1122
- ),
1123
- "attachment list begin": ("AttachList", parseAttachList),
1124
- "carets begin": ("LigCaretList", parseCaretList),
1125
- "mark attachment class definition begin": (
1126
- "MarkAttachClassDef",
1127
- lambda lines, font: parseClassDef(lines, font, klass=ot.MarkAttachClassDef),
1128
- ),
1129
- "markfilter set definition begin": ("MarkGlyphSetsDef", parseMarkFilteringSets),
1130
- }
1131
- for attr, parser in fields.values():
1132
- setattr(self, attr, None)
1133
- while lines.peek() is not None:
1134
- typ = lines.peek()[0].lower()
1135
- if typ not in fields:
1136
- log.debug("Skipping %s", typ)
1137
- next(lines)
1138
- continue
1139
- attr, parser = fields[typ]
1140
- assert getattr(self, attr) is None, attr
1141
- setattr(self, attr, parser(lines, font))
1142
- self.Version = 0x00010000 if self.MarkGlyphSetsDef is None else 0x00010002
1143
- container.table = self
1144
- return container
1145
-
1146
-
1147
- def parseCmap(lines, font):
1148
- container = ttLib.getTableClass("cmap")()
1149
- log.debug("Parsing cmap")
1150
- tables = []
1151
- while lines.peek() is not None:
1152
- lines.expect("cmap subtable %d" % len(tables))
1153
- platId, encId, fmt, lang = [
1154
- parseCmapId(lines, field)
1155
- for field in ("platformID", "encodingID", "format", "language")
1156
- ]
1157
- table = cmap_classes[fmt](fmt)
1158
- table.platformID = platId
1159
- table.platEncID = encId
1160
- table.language = lang
1161
- table.cmap = {}
1162
- line = next(lines)
1163
- while line[0] != "end subtable":
1164
- table.cmap[int(line[0], 16)] = line[1]
1165
- line = next(lines)
1166
- tables.append(table)
1167
- container.tableVersion = 0
1168
- container.tables = tables
1169
- return container
1170
-
1171
-
1172
- def parseCmapId(lines, field):
1173
- line = next(lines)
1174
- assert field == line[0]
1175
- return int(line[1])
1176
-
1177
-
1178
- def parseTable(lines, font, tableTag=None):
1179
- log.debug("Parsing table")
1180
- line = lines.peeks()
1181
- tag = None
1182
- if line[0].split()[0] == "FontDame":
1183
- tag = line[0].split()[1]
1184
- elif " ".join(line[0].split()[:3]) == "Font Chef Table":
1185
- tag = line[0].split()[3]
1186
- if tag is not None:
1187
- next(lines)
1188
- tag = tag.ljust(4)
1189
- if tableTag is None:
1190
- tableTag = tag
1191
- else:
1192
- assert tableTag == tag, (tableTag, tag)
1193
-
1194
- assert (
1195
- tableTag is not None
1196
- ), "Don't know what table to parse and data doesn't specify"
1197
-
1198
- return {
1199
- "GSUB": parseGSUB,
1200
- "GPOS": parseGPOS,
1201
- "GDEF": parseGDEF,
1202
- "cmap": parseCmap,
1203
- }[tableTag](lines, font)
1204
-
1205
-
1206
- class Tokenizer(object):
1207
- def __init__(self, f):
1208
- # TODO BytesIO / StringIO as needed? also, figure out whether we work on bytes or unicode
1209
- lines = iter(f)
1210
- try:
1211
- self.filename = f.name
1212
- except:
1213
- self.filename = None
1214
- self.lines = iter(lines)
1215
- self.line = ""
1216
- self.lineno = 0
1217
- self.stoppers = []
1218
- self.buffer = None
1219
-
1220
- def __iter__(self):
1221
- return self
1222
-
1223
- def _next_line(self):
1224
- self.lineno += 1
1225
- line = self.line = next(self.lines)
1226
- line = [s.strip() for s in line.split("\t")]
1227
- if len(line) == 1 and not line[0]:
1228
- del line[0]
1229
- if line and not line[-1]:
1230
- log.warning("trailing tab found on line %d: %s" % (self.lineno, self.line))
1231
- while line and not line[-1]:
1232
- del line[-1]
1233
- return line
1234
-
1235
- def _next_nonempty(self):
1236
- while True:
1237
- line = self._next_line()
1238
- # Skip comments and empty lines
1239
- if line and line[0] and (line[0][0] != "%" or line[0] == "% subtable"):
1240
- return line
1241
-
1242
- def _next_buffered(self):
1243
- if self.buffer:
1244
- ret = self.buffer
1245
- self.buffer = None
1246
- return ret
1247
- else:
1248
- return self._next_nonempty()
1249
-
1250
- def __next__(self):
1251
- line = self._next_buffered()
1252
- if line[0].lower() in self.stoppers:
1253
- self.buffer = line
1254
- raise StopIteration
1255
- return line
1256
-
1257
- def next(self):
1258
- return self.__next__()
1259
-
1260
- def peek(self):
1261
- if not self.buffer:
1262
- try:
1263
- self.buffer = self._next_nonempty()
1264
- except StopIteration:
1265
- return None
1266
- if self.buffer[0].lower() in self.stoppers:
1267
- return None
1268
- return self.buffer
1269
-
1270
- def peeks(self):
1271
- ret = self.peek()
1272
- return ret if ret is not None else ("",)
1273
-
1274
- @contextmanager
1275
- def between(self, tag):
1276
- start = tag + " begin"
1277
- end = tag + " end"
1278
- self.expectendswith(start)
1279
- self.stoppers.append(end)
1280
- yield
1281
- del self.stoppers[-1]
1282
- self.expect(tag + " end")
1283
-
1284
- @contextmanager
1285
- def until(self, tags):
1286
- if type(tags) is not tuple:
1287
- tags = (tags,)
1288
- self.stoppers.extend(tags)
1289
- yield
1290
- del self.stoppers[-len(tags) :]
1291
-
1292
- def expect(self, s):
1293
- line = next(self)
1294
- tag = line[0].lower()
1295
- assert tag == s, "Expected '%s', got '%s'" % (s, tag)
1296
- return line
1297
-
1298
- def expectendswith(self, s):
1299
- line = next(self)
1300
- tag = line[0].lower()
1301
- assert tag.endswith(s), "Expected '*%s', got '%s'" % (s, tag)
1302
- return line
1303
-
1304
-
1305
- def build(f, font, tableTag=None):
1306
- """Convert a Monotype font layout file to an OpenType layout object
1307
-
1308
- A font object must be passed, but this may be a "dummy" font; it is only
1309
- used for sorting glyph sets when making coverage tables and to hold the
1310
- OpenType layout table while it is being built.
1311
-
1312
- Args:
1313
- f: A file object.
1314
- font (TTFont): A font object.
1315
- tableTag (string): If provided, asserts that the file contains data for the
1316
- given OpenType table.
1317
-
1318
- Returns:
1319
- An object representing the table. (e.g. ``table_G_S_U_B_``)
1320
- """
1321
- lines = Tokenizer(f)
1322
- return parseTable(lines, font, tableTag=tableTag)
1323
-
1324
-
1325
- def main(args=None, font=None):
1326
- """Convert a FontDame OTL file to TTX XML
1327
-
1328
- Writes XML output to stdout.
1329
-
1330
- Args:
1331
- args: Command line arguments (``--font``, ``--table``, input files).
1332
- """
1333
- import sys
1334
- from fontTools import configLogger
1335
- from fontTools.misc.testTools import MockFont
1336
-
1337
- if args is None:
1338
- args = sys.argv[1:]
1339
-
1340
- # configure the library logger (for >= WARNING)
1341
- configLogger()
1342
- # comment this out to enable debug messages from mtiLib's logger
1343
- # log.setLevel(logging.DEBUG)
1344
-
1345
- import argparse
1346
-
1347
- parser = argparse.ArgumentParser(
1348
- "fonttools mtiLib",
1349
- description=main.__doc__,
1350
- )
1351
-
1352
- parser.add_argument(
1353
- "--font",
1354
- "-f",
1355
- metavar="FILE",
1356
- dest="font",
1357
- help="Input TTF files (used for glyph classes and sorting coverage tables)",
1358
- )
1359
- parser.add_argument(
1360
- "--table",
1361
- "-t",
1362
- metavar="TABLE",
1363
- dest="tableTag",
1364
- help="Table to fill (sniffed from input file if not provided)",
1365
- )
1366
- parser.add_argument(
1367
- "inputs", metavar="FILE", type=str, nargs="+", help="Input FontDame .txt files"
1368
- )
1369
-
1370
- args = parser.parse_args(args)
1371
-
1372
- if font is None:
1373
- if args.font:
1374
- font = ttLib.TTFont(args.font)
1375
- else:
1376
- font = MockFont()
1377
-
1378
- for f in args.inputs:
1379
- log.debug("Processing %s", f)
1380
- with open(f, "rt", encoding="utf-8") as f:
1381
- table = build(f, font, tableTag=args.tableTag)
1382
- blob = table.compile(font) # Make sure it compiles
1383
- decompiled = table.__class__()
1384
- decompiled.decompile(blob, font) # Make sure it decompiles!
1385
-
1386
- # continue
1387
- from fontTools.misc import xmlWriter
1388
-
1389
- tag = table.tableTag
1390
- writer = xmlWriter.XMLWriter(sys.stdout)
1391
- writer.begintag(tag)
1392
- writer.newline()
1393
- # table.toXML(writer, font)
1394
- decompiled.toXML(writer, font)
1395
- writer.endtag(tag)
1396
- writer.newline()
1397
-
1398
-
1399
- if __name__ == "__main__":
1400
- import sys
1401
-
1402
- sys.exit(main())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio_client/serializing.py DELETED
@@ -1,548 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import json
4
- import os
5
- import uuid
6
- from pathlib import Path
7
- from typing import Any
8
-
9
- from gradio_client import media_data, utils
10
- from gradio_client.data_classes import FileData
11
-
12
- with open(Path(__file__).parent / "types.json") as f:
13
- serializer_types = json.load(f)
14
-
15
-
16
- class Serializable:
17
- def serialized_info(self):
18
- """
19
- The typing information for this component as a dictionary whose values are a list of 2 strings: [Python type, language-agnostic description].
20
- Keys of the dictionary are: raw_input, raw_output, serialized_input, serialized_output
21
- """
22
- return self.api_info()
23
-
24
- def api_info(self) -> dict[str, list[str]]:
25
- """
26
- The typing information for this component as a dictionary whose values are a list of 2 strings: [Python type, language-agnostic description].
27
- Keys of the dictionary are: raw_input, raw_output, serialized_input, serialized_output
28
- """
29
- raise NotImplementedError()
30
-
31
- def example_inputs(self) -> dict[str, Any]:
32
- """
33
- The example inputs for this component as a dictionary whose values are example inputs compatible with this component.
34
- Keys of the dictionary are: raw, serialized
35
- """
36
- raise NotImplementedError()
37
-
38
- # For backwards compatibility
39
- def input_api_info(self) -> tuple[str, str]:
40
- api_info = self.api_info()
41
- return (api_info["serialized_input"][0], api_info["serialized_input"][1])
42
-
43
- # For backwards compatibility
44
- def output_api_info(self) -> tuple[str, str]:
45
- api_info = self.api_info()
46
- return (api_info["serialized_output"][0], api_info["serialized_output"][1])
47
-
48
- def serialize(self, x: Any, load_dir: str | Path = ""):
49
- """
50
- Convert data from human-readable format to serialized format for a browser.
51
- """
52
- return x
53
-
54
- def deserialize(
55
- self,
56
- x: Any,
57
- save_dir: str | Path | None = None,
58
- root_url: str | None = None,
59
- hf_token: str | None = None,
60
- ):
61
- """
62
- Convert data from serialized format for a browser to human-readable format.
63
- """
64
- return x
65
-
66
-
67
- class SimpleSerializable(Serializable):
68
- """General class that does not perform any serialization or deserialization."""
69
-
70
- def api_info(self) -> dict[str, bool | dict]:
71
- return {
72
- "info": serializer_types["SimpleSerializable"],
73
- "serialized_info": False,
74
- }
75
-
76
- def example_inputs(self) -> dict[str, Any]:
77
- return {
78
- "raw": None,
79
- "serialized": None,
80
- }
81
-
82
-
83
- class StringSerializable(Serializable):
84
- """Expects a string as input/output but performs no serialization."""
85
-
86
- def api_info(self) -> dict[str, bool | dict]:
87
- return {
88
- "info": serializer_types["StringSerializable"],
89
- "serialized_info": False,
90
- }
91
-
92
- def example_inputs(self) -> dict[str, Any]:
93
- return {
94
- "raw": "Howdy!",
95
- "serialized": "Howdy!",
96
- }
97
-
98
-
99
- class ListStringSerializable(Serializable):
100
- """Expects a list of strings as input/output but performs no serialization."""
101
-
102
- def api_info(self) -> dict[str, bool | dict]:
103
- return {
104
- "info": serializer_types["ListStringSerializable"],
105
- "serialized_info": False,
106
- }
107
-
108
- def example_inputs(self) -> dict[str, Any]:
109
- return {
110
- "raw": ["Howdy!", "Merhaba"],
111
- "serialized": ["Howdy!", "Merhaba"],
112
- }
113
-
114
-
115
- class BooleanSerializable(Serializable):
116
- """Expects a boolean as input/output but performs no serialization."""
117
-
118
- def api_info(self) -> dict[str, bool | dict]:
119
- return {
120
- "info": serializer_types["BooleanSerializable"],
121
- "serialized_info": False,
122
- }
123
-
124
- def example_inputs(self) -> dict[str, Any]:
125
- return {
126
- "raw": True,
127
- "serialized": True,
128
- }
129
-
130
-
131
- class NumberSerializable(Serializable):
132
- """Expects a number (int/float) as input/output but performs no serialization."""
133
-
134
- def api_info(self) -> dict[str, bool | dict]:
135
- return {
136
- "info": serializer_types["NumberSerializable"],
137
- "serialized_info": False,
138
- }
139
-
140
- def example_inputs(self) -> dict[str, Any]:
141
- return {
142
- "raw": 5,
143
- "serialized": 5,
144
- }
145
-
146
-
147
- class ImgSerializable(Serializable):
148
- """Expects a base64 string as input/output which is serialized to a filepath."""
149
-
150
- def serialized_info(self):
151
- return {"type": "string", "description": "filepath or URL to image"}
152
-
153
- def api_info(self) -> dict[str, bool | dict]:
154
- return {"info": serializer_types["ImgSerializable"], "serialized_info": True}
155
-
156
- def example_inputs(self) -> dict[str, Any]:
157
- return {
158
- "raw": media_data.BASE64_IMAGE,
159
- "serialized": "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png",
160
- }
161
-
162
- def serialize(
163
- self,
164
- x: str | None,
165
- load_dir: str | Path = "",
166
- ) -> str | None:
167
- """
168
- Convert from human-friendly version of a file (string filepath) to a serialized
169
- representation (base64).
170
- Parameters:
171
- x: String path to file to serialize
172
- load_dir: Path to directory containing x
173
- """
174
- if not x:
175
- return None
176
- if utils.is_http_url_like(x):
177
- return utils.encode_url_to_base64(x)
178
- return utils.encode_file_to_base64(Path(load_dir) / x)
179
-
180
- def deserialize(
181
- self,
182
- x: str | None,
183
- save_dir: str | Path | None = None,
184
- root_url: str | None = None,
185
- hf_token: str | None = None,
186
- ) -> str | None:
187
- """
188
- Convert from serialized representation of a file (base64) to a human-friendly
189
- version (string filepath). Optionally, save the file to the directory specified by save_dir
190
- Parameters:
191
- x: Base64 representation of image to deserialize into a string filepath
192
- save_dir: Path to directory to save the deserialized image to
193
- root_url: Ignored
194
- hf_token: Ignored
195
- """
196
- if x is None or x == "":
197
- return None
198
- file = utils.decode_base64_to_file(x, dir=save_dir)
199
- return file.name
200
-
201
-
202
- class FileSerializable(Serializable):
203
- """Expects a dict with base64 representation of object as input/output which is serialized to a filepath."""
204
-
205
- def serialized_info(self):
206
- return self._single_file_serialized_info()
207
-
208
- def _single_file_api_info(self):
209
- return {
210
- "info": serializer_types["SingleFileSerializable"],
211
- "serialized_info": True,
212
- }
213
-
214
- def _single_file_serialized_info(self):
215
- return {"type": "string", "description": "filepath or URL to file"}
216
-
217
- def _multiple_file_serialized_info(self):
218
- return {
219
- "type": "array",
220
- "description": "List of filepath(s) or URL(s) to files",
221
- "items": {"type": "string", "description": "filepath or URL to file"},
222
- }
223
-
224
- def _multiple_file_api_info(self):
225
- return {
226
- "info": serializer_types["MultipleFileSerializable"],
227
- "serialized_info": True,
228
- }
229
-
230
- def api_info(self) -> dict[str, dict | bool]:
231
- return self._single_file_api_info()
232
-
233
- def example_inputs(self) -> dict[str, Any]:
234
- return self._single_file_example_inputs()
235
-
236
- def _single_file_example_inputs(self) -> dict[str, Any]:
237
- return {
238
- "raw": {"is_file": False, "data": media_data.BASE64_FILE},
239
- "serialized": "https://github.com/gradio-app/gradio/raw/main/test/test_files/sample_file.pdf",
240
- }
241
-
242
- def _multiple_file_example_inputs(self) -> dict[str, Any]:
243
- return {
244
- "raw": [{"is_file": False, "data": media_data.BASE64_FILE}],
245
- "serialized": [
246
- "https://github.com/gradio-app/gradio/raw/main/test/test_files/sample_file.pdf"
247
- ],
248
- }
249
-
250
- def _serialize_single(
251
- self, x: str | FileData | None, load_dir: str | Path = ""
252
- ) -> FileData | None:
253
- if x is None or isinstance(x, dict):
254
- return x
255
- if utils.is_http_url_like(x):
256
- filename = x
257
- size = None
258
- else:
259
- filename = str(Path(load_dir) / x)
260
- size = Path(filename).stat().st_size
261
- return {
262
- "name": filename,
263
- "data": utils.encode_url_or_file_to_base64(filename),
264
- "orig_name": Path(filename).name,
265
- "is_file": False,
266
- "size": size,
267
- }
268
-
269
- def _deserialize_single(
270
- self,
271
- x: str | FileData | None,
272
- save_dir: str | None = None,
273
- root_url: str | None = None,
274
- hf_token: str | None = None,
275
- ) -> str | None:
276
- if x is None:
277
- return None
278
- if isinstance(x, str):
279
- file_name = utils.decode_base64_to_file(x, dir=save_dir).name
280
- elif isinstance(x, dict):
281
- if x.get("is_file"):
282
- filepath = x.get("name")
283
- assert filepath is not None, f"The 'name' field is missing in {x}"
284
- if root_url is not None:
285
- file_name = utils.download_tmp_copy_of_file(
286
- root_url + "file=" + filepath,
287
- hf_token=hf_token,
288
- dir=save_dir,
289
- )
290
- else:
291
- file_name = utils.create_tmp_copy_of_file(filepath, dir=save_dir)
292
- else:
293
- data = x.get("data")
294
- assert data is not None, f"The 'data' field is missing in {x}"
295
- file_name = utils.decode_base64_to_file(data, dir=save_dir).name
296
- else:
297
- raise ValueError(
298
- f"A FileSerializable component can only deserialize a string or a dict, not a {type(x)}: {x}"
299
- )
300
- return file_name
301
-
302
- def serialize(
303
- self,
304
- x: str | FileData | None | list[str | FileData | None],
305
- load_dir: str | Path = "",
306
- ) -> FileData | None | list[FileData | None]:
307
- """
308
- Convert from human-friendly version of a file (string filepath) to a
309
- serialized representation (base64)
310
- Parameters:
311
- x: String path to file to serialize
312
- load_dir: Path to directory containing x
313
- """
314
- if x is None or x == "":
315
- return None
316
- if isinstance(x, list):
317
- return [self._serialize_single(f, load_dir=load_dir) for f in x]
318
- else:
319
- return self._serialize_single(x, load_dir=load_dir)
320
-
321
- def deserialize(
322
- self,
323
- x: str | FileData | None | list[str | FileData | None],
324
- save_dir: Path | str | None = None,
325
- root_url: str | None = None,
326
- hf_token: str | None = None,
327
- ) -> str | None | list[str | None]:
328
- """
329
- Convert from serialized representation of a file (base64) to a human-friendly
330
- version (string filepath). Optionally, save the file to the directory specified by `save_dir`
331
- Parameters:
332
- x: Base64 representation of file to deserialize into a string filepath
333
- save_dir: Path to directory to save the deserialized file to
334
- root_url: If this component is loaded from an external Space, this is the URL of the Space.
335
- hf_token: If this component is loaded from an external private Space, this is the access token for the Space
336
- """
337
- if x is None:
338
- return None
339
- if isinstance(save_dir, Path):
340
- save_dir = str(save_dir)
341
- if isinstance(x, list):
342
- return [
343
- self._deserialize_single(
344
- f, save_dir=save_dir, root_url=root_url, hf_token=hf_token
345
- )
346
- for f in x
347
- ]
348
- else:
349
- return self._deserialize_single(
350
- x, save_dir=save_dir, root_url=root_url, hf_token=hf_token
351
- )
352
-
353
-
354
- class VideoSerializable(FileSerializable):
355
- def serialized_info(self):
356
- return {"type": "string", "description": "filepath or URL to video file"}
357
-
358
- def api_info(self) -> dict[str, dict | bool]:
359
- return {"info": serializer_types["FileSerializable"], "serialized_info": True}
360
-
361
- def example_inputs(self) -> dict[str, Any]:
362
- return {
363
- "raw": {"is_file": False, "data": media_data.BASE64_VIDEO},
364
- "serialized": "https://github.com/gradio-app/gradio/raw/main/test/test_files/video_sample.mp4",
365
- }
366
-
367
- def serialize(
368
- self, x: str | None, load_dir: str | Path = ""
369
- ) -> tuple[FileData | None, None]:
370
- return (super().serialize(x, load_dir), None) # type: ignore
371
-
372
- def deserialize(
373
- self,
374
- x: tuple[FileData | None, FileData | None] | None,
375
- save_dir: Path | str | None = None,
376
- root_url: str | None = None,
377
- hf_token: str | None = None,
378
- ) -> str | tuple[str | None, str | None] | None:
379
- """
380
- Convert from serialized representation of a file (base64) to a human-friendly
381
- version (string filepath). Optionally, save the file to the directory specified by `save_dir`
382
- """
383
- if isinstance(x, (tuple, list)):
384
- assert len(x) == 2, f"Expected tuple of length 2. Received: {x}"
385
- x_as_list = [x[0], x[1]]
386
- else:
387
- raise ValueError(f"Expected tuple of length 2. Received: {x}")
388
- deserialized_file = super().deserialize(x_as_list, save_dir, root_url, hf_token) # type: ignore
389
- if isinstance(deserialized_file, list):
390
- return deserialized_file[0] # ignore subtitles
391
-
392
-
393
- class JSONSerializable(Serializable):
394
- def serialized_info(self):
395
- return {"type": "string", "description": "filepath to JSON file"}
396
-
397
- def api_info(self) -> dict[str, dict | bool]:
398
- return {"info": serializer_types["JSONSerializable"], "serialized_info": True}
399
-
400
- def example_inputs(self) -> dict[str, Any]:
401
- return {
402
- "raw": {"a": 1, "b": 2},
403
- "serialized": None,
404
- }
405
-
406
- def serialize(
407
- self,
408
- x: str | None,
409
- load_dir: str | Path = "",
410
- ) -> dict | list | None:
411
- """
412
- Convert from a a human-friendly version (string path to json file) to a
413
- serialized representation (json string)
414
- Parameters:
415
- x: String path to json file to read to get json string
416
- load_dir: Path to directory containing x
417
- """
418
- if x is None or x == "":
419
- return None
420
- return utils.file_to_json(Path(load_dir) / x)
421
-
422
- def deserialize(
423
- self,
424
- x: str | dict | list,
425
- save_dir: str | Path | None = None,
426
- root_url: str | None = None,
427
- hf_token: str | None = None,
428
- ) -> str | None:
429
- """
430
- Convert from serialized representation (json string) to a human-friendly
431
- version (string path to json file). Optionally, save the file to the directory specified by `save_dir`
432
- Parameters:
433
- x: Json string
434
- save_dir: Path to save the deserialized json file to
435
- root_url: Ignored
436
- hf_token: Ignored
437
- """
438
- if x is None:
439
- return None
440
- return utils.dict_or_str_to_json_file(x, dir=save_dir).name
441
-
442
-
443
- class GallerySerializable(Serializable):
444
- def serialized_info(self):
445
- return {
446
- "type": "string",
447
- "description": "path to directory with images and a file associating images with captions called captions.json",
448
- }
449
-
450
- def api_info(self) -> dict[str, dict | bool]:
451
- return {
452
- "info": serializer_types["GallerySerializable"],
453
- "serialized_info": True,
454
- }
455
-
456
- def example_inputs(self) -> dict[str, Any]:
457
- return {
458
- "raw": [media_data.BASE64_IMAGE] * 2,
459
- "serialized": [
460
- "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png",
461
- ]
462
- * 2,
463
- }
464
-
465
- def serialize(
466
- self, x: str | None, load_dir: str | Path = ""
467
- ) -> list[list[str | None]] | None:
468
- if x is None or x == "":
469
- return None
470
- files = []
471
- captions_file = Path(x) / "captions.json"
472
- with captions_file.open("r") as captions_json:
473
- captions = json.load(captions_json)
474
- for file_name, caption in captions.items():
475
- img = FileSerializable().serialize(file_name)
476
- files.append([img, caption])
477
- return files
478
-
479
- def deserialize(
480
- self,
481
- x: list[list[str | None]] | None,
482
- save_dir: str = "",
483
- root_url: str | None = None,
484
- hf_token: str | None = None,
485
- ) -> None | str:
486
- if x is None:
487
- return None
488
- gallery_path = Path(save_dir) / str(uuid.uuid4())
489
- gallery_path.mkdir(exist_ok=True, parents=True)
490
- captions = {}
491
- for img_data in x:
492
- if isinstance(img_data, (list, tuple)):
493
- img_data, caption = img_data
494
- else:
495
- caption = None
496
- name = FileSerializable().deserialize(
497
- img_data, gallery_path, root_url=root_url, hf_token=hf_token
498
- )
499
- captions[name] = caption
500
- captions_file = gallery_path / "captions.json"
501
- with captions_file.open("w") as captions_json:
502
- json.dump(captions, captions_json)
503
- return os.path.abspath(gallery_path)
504
-
505
-
506
- SERIALIZER_MAPPING = {}
507
- for cls in Serializable.__subclasses__():
508
- SERIALIZER_MAPPING[cls.__name__] = cls
509
- for subcls in cls.__subclasses__():
510
- SERIALIZER_MAPPING[subcls.__name__] = subcls
511
-
512
- SERIALIZER_MAPPING["Serializable"] = SimpleSerializable
513
- SERIALIZER_MAPPING["File"] = FileSerializable
514
- SERIALIZER_MAPPING["UploadButton"] = FileSerializable
515
-
516
- COMPONENT_MAPPING: dict[str, type] = {
517
- "textbox": StringSerializable,
518
- "number": NumberSerializable,
519
- "slider": NumberSerializable,
520
- "checkbox": BooleanSerializable,
521
- "checkboxgroup": ListStringSerializable,
522
- "radio": StringSerializable,
523
- "dropdown": SimpleSerializable,
524
- "image": ImgSerializable,
525
- "video": FileSerializable,
526
- "audio": FileSerializable,
527
- "file": FileSerializable,
528
- "dataframe": JSONSerializable,
529
- "timeseries": JSONSerializable,
530
- "state": SimpleSerializable,
531
- "button": StringSerializable,
532
- "uploadbutton": FileSerializable,
533
- "colorpicker": StringSerializable,
534
- "label": JSONSerializable,
535
- "highlightedtext": JSONSerializable,
536
- "json": JSONSerializable,
537
- "html": StringSerializable,
538
- "gallery": GallerySerializable,
539
- "chatbot": JSONSerializable,
540
- "model3d": FileSerializable,
541
- "plot": JSONSerializable,
542
- "barplot": JSONSerializable,
543
- "lineplot": JSONSerializable,
544
- "scatterplot": JSONSerializable,
545
- "markdown": StringSerializable,
546
- "code": StringSerializable,
547
- "annotatedimage": JSONSerializable,
548
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DShrimp/PoseMaker/src/model.py DELETED
@@ -1,219 +0,0 @@
1
- import torch
2
- from collections import OrderedDict
3
-
4
- import torch
5
- import torch.nn as nn
6
-
7
- def make_layers(block, no_relu_layers):
8
- layers = []
9
- for layer_name, v in block.items():
10
- if 'pool' in layer_name:
11
- layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1],
12
- padding=v[2])
13
- layers.append((layer_name, layer))
14
- else:
15
- conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],
16
- kernel_size=v[2], stride=v[3],
17
- padding=v[4])
18
- layers.append((layer_name, conv2d))
19
- if layer_name not in no_relu_layers:
20
- layers.append(('relu_'+layer_name, nn.ReLU(inplace=True)))
21
-
22
- return nn.Sequential(OrderedDict(layers))
23
-
24
- class bodypose_model(nn.Module):
25
- def __init__(self):
26
- super(bodypose_model, self).__init__()
27
-
28
- # these layers have no relu layer
29
- no_relu_layers = ['conv5_5_CPM_L1', 'conv5_5_CPM_L2', 'Mconv7_stage2_L1',\
30
- 'Mconv7_stage2_L2', 'Mconv7_stage3_L1', 'Mconv7_stage3_L2',\
31
- 'Mconv7_stage4_L1', 'Mconv7_stage4_L2', 'Mconv7_stage5_L1',\
32
- 'Mconv7_stage5_L2', 'Mconv7_stage6_L1', 'Mconv7_stage6_L1']
33
- blocks = {}
34
- block0 = OrderedDict([
35
- ('conv1_1', [3, 64, 3, 1, 1]),
36
- ('conv1_2', [64, 64, 3, 1, 1]),
37
- ('pool1_stage1', [2, 2, 0]),
38
- ('conv2_1', [64, 128, 3, 1, 1]),
39
- ('conv2_2', [128, 128, 3, 1, 1]),
40
- ('pool2_stage1', [2, 2, 0]),
41
- ('conv3_1', [128, 256, 3, 1, 1]),
42
- ('conv3_2', [256, 256, 3, 1, 1]),
43
- ('conv3_3', [256, 256, 3, 1, 1]),
44
- ('conv3_4', [256, 256, 3, 1, 1]),
45
- ('pool3_stage1', [2, 2, 0]),
46
- ('conv4_1', [256, 512, 3, 1, 1]),
47
- ('conv4_2', [512, 512, 3, 1, 1]),
48
- ('conv4_3_CPM', [512, 256, 3, 1, 1]),
49
- ('conv4_4_CPM', [256, 128, 3, 1, 1])
50
- ])
51
-
52
-
53
- # Stage 1
54
- block1_1 = OrderedDict([
55
- ('conv5_1_CPM_L1', [128, 128, 3, 1, 1]),
56
- ('conv5_2_CPM_L1', [128, 128, 3, 1, 1]),
57
- ('conv5_3_CPM_L1', [128, 128, 3, 1, 1]),
58
- ('conv5_4_CPM_L1', [128, 512, 1, 1, 0]),
59
- ('conv5_5_CPM_L1', [512, 38, 1, 1, 0])
60
- ])
61
-
62
- block1_2 = OrderedDict([
63
- ('conv5_1_CPM_L2', [128, 128, 3, 1, 1]),
64
- ('conv5_2_CPM_L2', [128, 128, 3, 1, 1]),
65
- ('conv5_3_CPM_L2', [128, 128, 3, 1, 1]),
66
- ('conv5_4_CPM_L2', [128, 512, 1, 1, 0]),
67
- ('conv5_5_CPM_L2', [512, 19, 1, 1, 0])
68
- ])
69
- blocks['block1_1'] = block1_1
70
- blocks['block1_2'] = block1_2
71
-
72
- self.model0 = make_layers(block0, no_relu_layers)
73
-
74
- # Stages 2 - 6
75
- for i in range(2, 7):
76
- blocks['block%d_1' % i] = OrderedDict([
77
- ('Mconv1_stage%d_L1' % i, [185, 128, 7, 1, 3]),
78
- ('Mconv2_stage%d_L1' % i, [128, 128, 7, 1, 3]),
79
- ('Mconv3_stage%d_L1' % i, [128, 128, 7, 1, 3]),
80
- ('Mconv4_stage%d_L1' % i, [128, 128, 7, 1, 3]),
81
- ('Mconv5_stage%d_L1' % i, [128, 128, 7, 1, 3]),
82
- ('Mconv6_stage%d_L1' % i, [128, 128, 1, 1, 0]),
83
- ('Mconv7_stage%d_L1' % i, [128, 38, 1, 1, 0])
84
- ])
85
-
86
- blocks['block%d_2' % i] = OrderedDict([
87
- ('Mconv1_stage%d_L2' % i, [185, 128, 7, 1, 3]),
88
- ('Mconv2_stage%d_L2' % i, [128, 128, 7, 1, 3]),
89
- ('Mconv3_stage%d_L2' % i, [128, 128, 7, 1, 3]),
90
- ('Mconv4_stage%d_L2' % i, [128, 128, 7, 1, 3]),
91
- ('Mconv5_stage%d_L2' % i, [128, 128, 7, 1, 3]),
92
- ('Mconv6_stage%d_L2' % i, [128, 128, 1, 1, 0]),
93
- ('Mconv7_stage%d_L2' % i, [128, 19, 1, 1, 0])
94
- ])
95
-
96
- for k in blocks.keys():
97
- blocks[k] = make_layers(blocks[k], no_relu_layers)
98
-
99
- self.model1_1 = blocks['block1_1']
100
- self.model2_1 = blocks['block2_1']
101
- self.model3_1 = blocks['block3_1']
102
- self.model4_1 = blocks['block4_1']
103
- self.model5_1 = blocks['block5_1']
104
- self.model6_1 = blocks['block6_1']
105
-
106
- self.model1_2 = blocks['block1_2']
107
- self.model2_2 = blocks['block2_2']
108
- self.model3_2 = blocks['block3_2']
109
- self.model4_2 = blocks['block4_2']
110
- self.model5_2 = blocks['block5_2']
111
- self.model6_2 = blocks['block6_2']
112
-
113
-
114
- def forward(self, x):
115
-
116
- out1 = self.model0(x)
117
-
118
- out1_1 = self.model1_1(out1)
119
- out1_2 = self.model1_2(out1)
120
- out2 = torch.cat([out1_1, out1_2, out1], 1)
121
-
122
- out2_1 = self.model2_1(out2)
123
- out2_2 = self.model2_2(out2)
124
- out3 = torch.cat([out2_1, out2_2, out1], 1)
125
-
126
- out3_1 = self.model3_1(out3)
127
- out3_2 = self.model3_2(out3)
128
- out4 = torch.cat([out3_1, out3_2, out1], 1)
129
-
130
- out4_1 = self.model4_1(out4)
131
- out4_2 = self.model4_2(out4)
132
- out5 = torch.cat([out4_1, out4_2, out1], 1)
133
-
134
- out5_1 = self.model5_1(out5)
135
- out5_2 = self.model5_2(out5)
136
- out6 = torch.cat([out5_1, out5_2, out1], 1)
137
-
138
- out6_1 = self.model6_1(out6)
139
- out6_2 = self.model6_2(out6)
140
-
141
- return out6_1, out6_2
142
-
143
- class handpose_model(nn.Module):
144
- def __init__(self):
145
- super(handpose_model, self).__init__()
146
-
147
- # these layers have no relu layer
148
- no_relu_layers = ['conv6_2_CPM', 'Mconv7_stage2', 'Mconv7_stage3',\
149
- 'Mconv7_stage4', 'Mconv7_stage5', 'Mconv7_stage6']
150
- # stage 1
151
- block1_0 = OrderedDict([
152
- ('conv1_1', [3, 64, 3, 1, 1]),
153
- ('conv1_2', [64, 64, 3, 1, 1]),
154
- ('pool1_stage1', [2, 2, 0]),
155
- ('conv2_1', [64, 128, 3, 1, 1]),
156
- ('conv2_2', [128, 128, 3, 1, 1]),
157
- ('pool2_stage1', [2, 2, 0]),
158
- ('conv3_1', [128, 256, 3, 1, 1]),
159
- ('conv3_2', [256, 256, 3, 1, 1]),
160
- ('conv3_3', [256, 256, 3, 1, 1]),
161
- ('conv3_4', [256, 256, 3, 1, 1]),
162
- ('pool3_stage1', [2, 2, 0]),
163
- ('conv4_1', [256, 512, 3, 1, 1]),
164
- ('conv4_2', [512, 512, 3, 1, 1]),
165
- ('conv4_3', [512, 512, 3, 1, 1]),
166
- ('conv4_4', [512, 512, 3, 1, 1]),
167
- ('conv5_1', [512, 512, 3, 1, 1]),
168
- ('conv5_2', [512, 512, 3, 1, 1]),
169
- ('conv5_3_CPM', [512, 128, 3, 1, 1])
170
- ])
171
-
172
- block1_1 = OrderedDict([
173
- ('conv6_1_CPM', [128, 512, 1, 1, 0]),
174
- ('conv6_2_CPM', [512, 22, 1, 1, 0])
175
- ])
176
-
177
- blocks = {}
178
- blocks['block1_0'] = block1_0
179
- blocks['block1_1'] = block1_1
180
-
181
- # stage 2-6
182
- for i in range(2, 7):
183
- blocks['block%d' % i] = OrderedDict([
184
- ('Mconv1_stage%d' % i, [150, 128, 7, 1, 3]),
185
- ('Mconv2_stage%d' % i, [128, 128, 7, 1, 3]),
186
- ('Mconv3_stage%d' % i, [128, 128, 7, 1, 3]),
187
- ('Mconv4_stage%d' % i, [128, 128, 7, 1, 3]),
188
- ('Mconv5_stage%d' % i, [128, 128, 7, 1, 3]),
189
- ('Mconv6_stage%d' % i, [128, 128, 1, 1, 0]),
190
- ('Mconv7_stage%d' % i, [128, 22, 1, 1, 0])
191
- ])
192
-
193
- for k in blocks.keys():
194
- blocks[k] = make_layers(blocks[k], no_relu_layers)
195
-
196
- self.model1_0 = blocks['block1_0']
197
- self.model1_1 = blocks['block1_1']
198
- self.model2 = blocks['block2']
199
- self.model3 = blocks['block3']
200
- self.model4 = blocks['block4']
201
- self.model5 = blocks['block5']
202
- self.model6 = blocks['block6']
203
-
204
- def forward(self, x):
205
- out1_0 = self.model1_0(x)
206
- out1_1 = self.model1_1(out1_0)
207
- concat_stage2 = torch.cat([out1_1, out1_0], 1)
208
- out_stage2 = self.model2(concat_stage2)
209
- concat_stage3 = torch.cat([out_stage2, out1_0], 1)
210
- out_stage3 = self.model3(concat_stage3)
211
- concat_stage4 = torch.cat([out_stage3, out1_0], 1)
212
- out_stage4 = self.model4(concat_stage4)
213
- concat_stage5 = torch.cat([out_stage4, out1_0], 1)
214
- out_stage5 = self.model5(concat_stage5)
215
- concat_stage6 = torch.cat([out_stage5, out1_0], 1)
216
- out_stage6 = self.model6(concat_stage6)
217
- return out_stage6
218
-
219
-