parquet-converter commited on
Commit
1fdc59f
·
1 Parent(s): f863f1f

Update parquet files (step 119 of 121)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Chashme Baddoor marathi movie mp4 hd free download Laugh out loud with Ali Zafar Taapsee Pannu and Siddharth.md +0 -147
  2. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Car Parking Multiplayer Mod Apk Drive Park and Customize 70 Vehicles with Everything Unlocked.md +0 -88
  3. spaces/1phancelerku/anime-remove-background/Download queue xbox series x How to see and prioritize your game and app installations.md +0 -170
  4. spaces/1phancelerku/anime-remove-background/Explore the possibilities of melon playground a sandbox game for iOS and Android that is easy to play and hard to put down.md +0 -91
  5. spaces/4Taps/SadTalker/src/audio2pose_models/discriminator.py +0 -76
  6. spaces/801artistry/RVC801/README.md +0 -10
  7. spaces/801artistry/RVC801/diffq/base.py +0 -262
  8. spaces/AIFILMS/generate_human_motion/pyrender/pyrender/platforms/egl.py +0 -219
  9. spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/commons/normalizing_flow/glow_modules.py +0 -362
  10. spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/x_transformer.py +0 -641
  11. spaces/AIGText/GlyphControl/annotator/util.py +0 -38
  12. spaces/AIatUIUC/CodeLATS/executors/executor_types.py +0 -20
  13. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/crowdhuman/yolov5_s-v61_fast_8xb16-300e_crowdhuman.py +0 -47
  14. spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/selector/classroom.py +0 -47
  15. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/custom/Custom.js +0 -18
  16. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridtable/input/SwipeCell.js +0 -26
  17. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sides/Sides.js +0 -84
  18. spaces/Alpaca233/SadTalker/src/face3d/visualize.py +0 -48
  19. spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/python/dqn/policies.py +0 -237
  20. spaces/Andres99/Tune-A-Video-Training-UI/app_upload.py +0 -106
  21. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/control_brightness.md +0 -45
  22. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/inpaint.md +0 -76
  23. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_vq_diffusion.py +0 -496
  24. spaces/Andy1621/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py +0 -4
  25. spaces/Andy1621/uniformer_image_detection/configs/yolo/yolov3_d53_mstrain-608_273e_coco.py +0 -124
  26. spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/util/slio.py +0 -177
  27. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/network/lazy_wheel.py +0 -210
  28. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/codingstatemachinedict.py +0 -19
  29. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/spinner.py +0 -137
  30. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/debug.py +0 -5
  31. spaces/Atualli/mediapipe-pose-estimation/README.md +0 -13
  32. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_50_FPN_400ep_LSJ.py +0 -14
  33. spaces/Babelscape/mrebel-demo/app.py +0 -123
  34. spaces/Bambicita/rvc-models/app.py +0 -188
  35. spaces/Benson/text-generation/Examples/Descargar Gratis Brawl Stars.md +0 -71
  36. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/packaging/requirements.py +0 -146
  37. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/rule.py +0 -130
  38. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/traceback.py +0 -756
  39. spaces/BlitzEsports/TextToImage/README.md +0 -10
  40. spaces/BlitzKriegM/argilla/README.md +0 -19
  41. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/evaluation/rotated_coco_evaluation.py +0 -203
  42. spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/datasets/gqa/gqa_loader.py +0 -277
  43. spaces/CVPR/LIVE/pybind11/tests/test_numpy_dtypes.py +0 -312
  44. spaces/CVPR/LIVE/thrust/thrust/cmake/FindTBB.cmake +0 -440
  45. spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/per_device_resource.h +0 -22
  46. spaces/CVPR/ml-talking-face/client_rest.py +0 -74
  47. spaces/Caoyunkang/Segment-Any-Anomaly/SAM/segment_anything/modeling/prompt_encoder.py +0 -214
  48. spaces/ChandraMohanNayal/AutoGPT/autogpt/agent/agent.py +0 -197
  49. spaces/CikeyQI/meme-api/meme_generator/memes/decent_kiss/__init__.py +0 -18
  50. spaces/Cong723/gpt-academic-public/crazy_functions/test_project/cpp/cppipc/prod_cons.h +0 -433
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Chashme Baddoor marathi movie mp4 hd free download Laugh out loud with Ali Zafar Taapsee Pannu and Siddharth.md DELETED
@@ -1,147 +0,0 @@
1
- <br />
2
- <h1>Chashme Baddoor: A Hilarious Comedy Movie in Marathi</h1>
3
- <h2>Introduction</h2>
4
- <p>If you are looking for a fun and entertaining movie to watch with your friends or family, you should check out Chashme Baddoor. It is a Marathi comedy movie that was released in 2013. It is a remake of the 1981 Hindi movie of the same name, which was directed by Sai Paranjape and starred Farooq Shaikh, Deepti Naval, Rakesh Bedi, and Ravi Baswani.</p>
5
- <p>In this article, we will tell you everything you need to know about Chashme Baddoor, including what it is about, who are the main actors and characters, why you should watch it, and how to watch it online for free.</p>
6
- <h2>Chashme Baddoor marathi movie mp4 hd free download</h2><br /><p><b><b>Download File</b> &#9745; <a href="https://byltly.com/2uKwgN">https://byltly.com/2uKwgN</a></b></p><br /><br />
7
- <h2>What is Chashme Baddoor?</h2>
8
- <p>Chashme Baddoor is a comedy movie that revolves around three friends and roommates who are studying at Delhi University. Siddharth (played by Ali Zafar) is a studious and sincere guy who is preparing for his PhD. Omi (played by Divyendu Sharma) and Jai (played by Siddharth) are lazy and flirtatious guys who are always chasing girls and having fun.</p>
9
- <p>One day, they see a new girl in their neighborhood, Seema (played by Taapsee Pannu), who is a salesgirl for Chamko washing powder. Omi and Jai try to impress her with their tricks, but they fail miserably. Siddharth, on the other hand, falls in love with her at first sight and starts a relationship with her.</p>
10
- <p>When Omi and Jai find out about this, they feel jealous and betrayed by their friend. They decide to break up their relationship by lying to Seema that Siddharth is already married and has a child. They also lie to Siddharth that Seema is a notorious criminal who is wanted by the police.</p>
11
- <p>Will their plan succeed? Will Siddharth and Seema find out the truth? Will their friendship survive this test? You will have to watch the movie to find out.</p>
12
- <h2>Who are the main actors and characters?</h2>
13
- <p>The movie has a talented cast of actors who have done justice to their roles. Here are some of the main actors and characters in the movie:</p>
14
- <ul>
15
- <li>Ali Zafar as Siddharth: He is a Pakistani singer, actor, and model who has worked in several Bollywood movies such as Tere Bin Laden, Mere Brother Ki Dulhan, London Paris New York, and Total Siyapaa. He plays the role of Siddharth, the smart and sensible friend who falls in love with Seema.</li>
16
- <li>Taapsee Pannu as Seema: She is an Indian actress who has worked in Hindi, Tamil, Telugu, Malayalam, and Kannada movies. She has appeared in movies such as Pink, Naam Shabana, Judwaa 2, Badla, Mission Mangal, Thappad, and Haseen Dillruba. She plays the role of Seema, the bubbly and beautiful girl who works as a salesgirl for Chamko washing powder.</li>
17
- <li>Siddharth as Jai: He is an Indian actor who has worked in Tamil, Telugu, Hindi, and Malayalam movies. He has appeared in movies such as Rang De Basanti, Striker, Bommarillu, Aata Nade Veta Nade (Telugu), Jigarthanda (Tamil), Enakkul Oruvan (Tamil), Aranmanai 2 (Tamil), The House Next Door (Hindi), Aval (Tamil), Gruham (Telugu), Sivappu Manjal Pachai (Tamil), Takkar (Tamil), Maha Samudram (Telugu). He plays the role of Jai, the mischievous and cunning friend who tries to woo Seema.</li>
18
- <li>Divyendu Sharma as Omi: He is an Indian actor who has worked in Hindi movies such as Pyaar Ka Punchnama, Fukrey, Toilet: Ek Prem Katha, Batti Gul Meter Chalu, Badnaam Gali, Shukranu, Mirzapur, Bicchoo Ka Khel. He plays the role of Omi, the funny and poetic friend who also tries to impress Seema.</li>
19
- <li>Rishi Kapoor as Mr. Joseph Furtado: He was an Indian actor who was one of the most popular stars of Bollywood. He had worked in more than 150 movies such as Bobby, Amar Akbar Anthony, Karz, Prem Rog, Saagar, Chandni, Agneepath, Kapoor & Sons, Mulk, 102 Not Out. He played the role of Mr. Joseph Furtado, the owner of a cafe where Siddharth works part-time. He also has a crush on Miss Josephine (played by Juhi Chawla).</li>
20
- <li>Anupam Kher as Suryakant Paranjape: He is an Indian actor who has worked in more than 500 movies in Hindi and other languages. He has appeared in movies such as Saaransh, Ram Lakhan, Dilwale Dulhania Le Jayenge, Kuch Kuch Hota Hai, Bend It Like Beckham, A Wednesday!, Special 26, Baby, The Accidental Prime Minister. He plays a double role of Suryakant Paranjape, Seema's strict father who wants her to get married soon; and Lallan Miyan, a shopkeeper who sells cigarettes to Omi and Jai on credit.</li>
21
- </ul>
22
- <h2>Why you should watch Chashme Baddoor</h2>
23
- <p>There are many reasons why you should watch Chashme Baddoor. Here are some of them:</p>
24
- <h3>It is a remake of a classic comedy movie</h3>
25
- <p>The original Chashme Baddoor was released in 1981 and was directed by Sai Paranjape. It was one of the most successful comedy movies of that time and received critical acclaim for its witty script, realistic characters, and hilarious situations. It also had some memorable songs composed by Raj Kamal and sung by Yesudas, Hemlata, Shailendra Singh, Anand Kumar C., etc.</p>
26
- <p>Chashme Baddoor Marathi film HD online watch<br />
27
- Chashme Baddoor 2013 Marathi movie free download mp4<br />
28
- Watch Chashme Baddoor full movie HD Marathi<br />
29
- Download Chashme Baddoor Marathi comedy film in HD quality<br />
30
- Chashme Baddoor Marathi movie HD 1080p free download<br />
31
- How to watch Chashme Baddoor Marathi movie online for free<br />
32
- Chashme Baddoor Marathi film mp4 download link<br />
33
- Chashme Baddoor full movie in Marathi HD download<br />
34
- Chashme Baddoor Marathi movie free streaming HD<br />
35
- Download Chashme Baddoor Marathi movie HD 720p<br />
36
- Chashme Baddoor Marathi film download mp4 high quality<br />
37
- Watch Chashme Baddoor Marathi comedy movie HD online<br />
38
- Chashme Baddoor full movie Marathi HD free download<br />
39
- Chashme Baddoor Marathi movie HD watch online free<br />
40
- Download Chashme Baddoor 2013 Marathi film in HD mp4<br />
41
- Chashme Baddoor Marathi movie HD download link<br />
42
- Watch Chashme Baddoor full movie in Marathi HD online<br />
43
- Chashme Baddoor Marathi film free download mp4 HD<br />
44
- Chashme Baddoor Marathi comedy movie HD online watch<br />
45
- Download Chashme Baddoor full movie Marathi HD<br />
46
- Chashme Baddoor Marathi movie download mp4 HD quality<br />
47
- Watch Chashme Baddoor 2013 Marathi film HD online free<br />
48
- Chashme Baddoor full movie in Marathi free download mp4 HD<br />
49
- Chashme Baddoor Marathi film HD streaming online<br />
50
- Download Chashme Baddoor Marathi comedy movie in HD mp4<br />
51
- Chashme Baddoor Marathi movie free download HD 1080p<br />
52
- Watch Chashme Baddoor full movie HD in Marathi online<br />
53
- Chashme Baddoor Marathi film download link mp4 HD<br />
54
- Chashme Baddoor 2013 Marathi movie HD watch online<br />
55
- Download Chashme Baddoor full movie in Marathi mp4 HD<br />
56
- Chashme Baddoor Marathi movie download HD 720p free<br />
57
- Watch Chashme Baddoor Marathi film online for free in HD<br />
58
- Chashme Baddoor full movie Marathi download mp4 HD quality<br />
59
- Chashme Baddoor Marathi comedy film free streaming HD online<br />
60
- Download Chashme Baddoor 2013 Marathi movie in HD quality mp4<br />
61
- Watch Chashme Baddoor full movie online free in Marathi HD<br />
62
- Download link for Chashme Baddoor Marathi film mp4 HD <br />
63
- Watch online Chashme Baddoor 2013 Marathi comedy movie in HD <br />
64
- Free download of Chashme Baddoor full movie in Marathi mp4 HD <br />
65
- Online streaming of Chashme Baddoor Marathi film in HD quality <br />
66
- Download and watch Chashme Baddoor 2013 Marathi movie in HD mp4 <br />
67
- Watch and download Chashme Baddoor full movie in Marathi HD quality <br />
68
- Free online watch of Chashme Baddoor 2013 comedy film in Marathi <br />
69
- Free online download of Chashme Baddoor full comedy movie in Marathi <br />
70
- Watch or download Chashme Baddoor 2013 film in Marathi mp4 format</p>
71
- <p>The remake pays tribute to the original movie by retaining some of its scenes and dialogues. It also adds some new twists and turns to make it more contemporary and appealing to the modern audience. The remake also has some catchy songs composed by Sajid-Wajid and sung by Ali Zafar, Sonu Nigam, Shreya Ghoshal Wajid Khan Neuman Pinto etc.</p>
72
- <h3>It has a lot of funny scenes and dialogues</h3>
73
- <p>The movie is full of comedy scenes that will make you laugh out loud. Some of them are:</p>
74
- <ul>
75
- <li>The scene where Omi recites his shayari (poetry) to impress Seema but ends up insulting her instead.</li>
76
- <li>The scene where Jai pretends to be a film director and tries to cast Seema in his movie but gets caught by her father.</li>
77
- <li>The scene where Omi and Jai lie to Seema about Siddharth's marriage and show her fake photos of his wife and child.</li>
78
- <li>The scene where Omi and Jai lie to Siddharth about Seema's criminal background and show him fake news clippings of her crimes.</li>
79
- <li>The scene where Omi Jai and Neha's grandmother plan to stage a fake kidnapping but end up getting kidnapped by real goons.</li>
80
- <li>The scene where Mr. Joseph Furtado tries to propose to Miss Josephine but gets interrupted by Lallan Miyan.</li>
81
- </ul>
82
- <p>The movie also has some witty dialogues that will make you chuckle. Some of them are:</p>
83
- <ul>
84
- <li>"Har ek friend kamina hota hai" (Every friend is a scoundrel) - The catchy song that describes the friendship of Omi, Jai, and Siddharth.</li>
85
- <li>"Dhichkyaaon doom doom" (Bang bang) - The romantic song that shows the chemistry between Siddharth and Seema.</li>
86
- <li>"Early to bed and early to rise, makes a man healthy, wealthy and wise" - The motto of Siddharth that annoys Omi and Jai.</li>
87
- <li>"Chamko chamko chamko, sabki pasand Chamko" (Chamko chamko chamko, everyone's favorite Chamko) - The jingle of the washing powder that Seema sells.</li>
88
- <li>"Tumhara naam kya hai Basanti?" (What is your name Basanti?) - The famous dialogue from Sholay that Jai uses to flirt with Seema.</li>
89
- </ul>
90
- <h3>It has a good message about friendship and love</h3>
91
- <p>The movie is not just a comedy, but also a heartwarming story about friendship and love. It shows how true friends stick together through thick and thin, and how they support each other in times of need. It also shows how love can overcome misunderstandings and obstacles, and how it can bring happiness and peace to one's life.</p>
92
- <p>The movie also has some emotional scenes that will touch your heart. Some of them are:</p>
93
- <ul>
94
- <li>The scene where Siddharth confronts Omi and Jai for lying to him and Seema, and breaks his friendship with them.</li>
95
- <li>The scene where Omi and Jai realize their mistake and apologize to Siddharth and Seema, and reunite with them.</li>
96
- <li>The scene where Seema's father accepts Siddharth as his son-in-law and blesses their marriage.</li>
97
- <li>The scene where Mr. Joseph Furtado finally proposes to Miss Josephine and they get married.</li>
98
- </ul>
99
- <h2>How to watch Chashme Baddoor online for free</h2>
100
- <p>If you are wondering how to watch Chashme Baddoor online for free, you have several options. Here are some of them:</p>
101
- <h3>Netflix</h3>
102
- <p>Netflix is one of the most popular streaming platforms in the world. It has a huge collection of movies and shows in different languages and genres. You can watch Chashme Baddoor on Netflix with a subscription. You can also get a free trial for 30 days if you are a new user.</p>
103
- <p>To watch Chashme Baddoor on Netflix, you need to follow these steps:</p>
104
- <ol>
105
- <li>Go to https://www.netflix.com/ and sign up for an account or log in if you already have one.</li>
106
- <li>Search for Chashme Baddoor in the search bar or browse through the categories.</li>
107
- <li>Click on the movie title and enjoy watching it.</li>
108
- </ol>
109
- <h3>Desi Cinemas</h3>
110
- <p>Desi Cinemas is a website that offers free streaming of Bollywood movies online. It has a good collection of movies in HD quality and with English subtitles. You can watch Chashme Baddoor on Desi Cinemas without any registration or subscription.</p>
111
- <p>To watch Chashme Baddoor on Desi Cinemas, you need to follow these steps:</p>
112
- <ol>
113
- <li>Go to https://desicinemas.tv/ and search for Chashme Baddoor in the search bar or browse through the categories.</li>
114
- <li>Click on the movie title and choose a server to stream it from.</li>
115
- <li>Enjoy watching the movie.</li>
116
- </ol>
117
- <h3>JustWatch</h3>
118
- <p>JustWatch is a website that helps you find where to watch movies and shows online. It shows you the availability and price of different streaming platforms for any movie or show you want to watch. You can use JustWatch to find out where to watch Chashme Baddoor online for free or for a low cost.</p>
119
- <p>To use JustWatch, you need to follow these steps:</p>
120
- <ol>
121
- <li>Go to https://www.justwatch.com/ and select your country from the menu.</li>
122
- <li>Search for Chashme Baddoor in the search bar or browse through the categories.</li>
123
- <li>Click on the movie title and see the list of streaming platforms where you can watch it.</li>
124
- <li>Select the platform that suits your preference and budget, and click on the link to go to its website.</li>
125
- <li>Watch the movie on the chosen platform.</li>
126
- </ol>
127
- <h2>Conclusion</h2>
128
- <p>In conclusion, Chashme Baddoor is a comedy movie that you should not miss. It is a remake of a classic movie that has been updated with new twists and turns. It has a talented cast of actors who have delivered hilarious performances. It has a lot of funny scenes and dialogues that will make you laugh out loud. It also has a good message about friendship and love that will touch your heart.</p>
129
- <p>If you want to watch Chashme Baddoor online for free, you have several options such as Netflix, Desi Cinemas, and JustWatch. You can choose any of them according to your convenience and preference. You can also download the movie from various websites if you want to watch it offline.</p>
130
- <p>So what are you waiting for? Grab your popcorn and your friends, and watch Chashme Baddoor online for free. You will surely have a great time watching this movie.</p>
131
- <h3>FAQs</h3>
132
- <p>Here are some frequently asked questions about Chashme Baddoor:</p>
133
- <ol>
134
- <li>Q: Is Chashme Baddoor a remake of a movie?<br>
135
- A: Yes, Chashme Baddoor is a remake of the 1981 Hindi movie of the same name, which was directed by Sai Paranjape and starred Farooq Shaikh, Deepti Naval, Rakesh Bedi, and Ravi Baswani.</li>
136
- <li>Q: Who are the main actors and characters in Chashme Baddoor?<br>
137
- A: The main actors and characters in Chashme Baddoor are Ali Zafar as Siddharth, Taapsee Pannu as Seema, Siddharth as Jai, Divyendu Sharma as Omi, Rishi Kapoor as Mr. Joseph Furtado, and Anupam Kher as Suryakant Paranjape and Lallan Miyan.</li>
138
- <li>Q: What are some of the songs in Chashme Baddoor?<br>
139
- A: Some of the songs in Chashme Baddoor are "Har ek friend kamina hota hai", "Dhichkyaaon doom doom", "Early to bed and early to rise", and "Chamko chamko chamko".</li>
140
- <li>Q: Where can I watch Chashme Baddoor online for free?<br>
141
- A: You can watch Chashme Baddoor online for free on Netflix, Desi Cinemas, or JustWatch. You can also download the movie from various websites if you want to watch it offline.</li>
142
- <li>Q: What is the message of Chashme Baddoor?<br>
143
- A: The message of Chashme Baddoor is that friendship and love are precious and should not be ruined by jealousy and lies. It also shows that true friends and lovers will always stand by each other and overcome any difficulties.</li>
144
- </ol>
145
- </p> 0a6ba089eb<br />
146
- <br />
147
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Car Parking Multiplayer Mod Apk Drive Park and Customize 70 Vehicles with Everything Unlocked.md DELETED
@@ -1,88 +0,0 @@
1
-
2
- <h1>Car Parking Multiplayer Mod APK Unlocked Everything New Version</h1>
3
- <p>Do you love driving and parking games? Do you want to experience the thrill of realistic car physics, open world map, multiplayer mode, customization options, and different game modes? If yes, then you should try Car Parking Multiplayer, one of the most popular and realistic car parking simulation games for Android devices. And if you want to enjoy the game with unlimited money, gold, cars, items, and no ads, then you should download Car Parking Multiplayer Mod APK, the modified version of the game that gives you everything unlocked for free. In this article, we will tell you what is Car Parking Multiplayer, what are its features, why you should download Car Parking Multiplayer Mod APK, and how to download and install it on your device.</p>
4
- <h2>What is Car Parking Multiplayer?</h2>
5
- <p>Car Parking Multiplayer is a car parking simulation game developed by olzhass, a game studio that specializes in realistic driving and parking games. The game has more than 100 million downloads on Google Play Store and has an average rating of 4.3 out of 5 stars. The game lets you drive and park various types of cars, from sedans to sports cars, from trucks to buses, from classic cars to modern cars. You can choose from more than 150 different cars, each with its own characteristics and features. You can also customize and tune your cars according to your preferences, changing the color, wheels, suspension, engine, turbo, exhaust, and more.</p>
6
- <h2>car parking multiplayer mod apk unlocked everything new version</h2><br /><p><b><b>Download File</b> &#9913;&#9913;&#9913; <a href="https://urlin.us/2uSTgz">https://urlin.us/2uSTgz</a></b></p><br /><br />
7
- <h3>Features of Car Parking Multiplayer</h3>
8
- <p>Car Parking Multiplayer is not just a simple car parking game. It has many features that make it stand out from other similar games. Here are some of the features of Car Parking Multiplayer:</p>
9
- <h4>Realistic car physics and graphics</h4>
10
- <p>The game uses advanced car physics and graphics to create a realistic driving and parking experience. You can feel the weight, speed, acceleration, braking, steering, and suspension of each car. You can also see the details of each car model, such as the interior, exterior, lights, mirrors, doors, windows, etc. The game also has realistic sound effects for each car engine, horn, tire screech, collision, etc.</p>
11
- <h4>Open world map and multiplayer mode</h4>
12
- <p>The game has an open world map that you can explore freely. You can drive around the city streets, highways, parking lots, airports, deserts, forests, beaches, etc. You can also interact with other players in the multiplayer mode. You can chat with them using voice or text messages. You can also join or create your own online room with up to 100 players. You can race with them, challenge them to parking competitions, or just have fun together.</p>
13
- <h4>Customization and tuning options</h4>
14
- <p>The game gives you a lot of options to customize and tune your cars. You can change the color of your car using a color picker or choose from predefined colors. You can also change the wheels of your car using different rims and tires. You can also modify the suspension height, camber angle, wheel size, etc. You can also upgrade the engine performance of your car using turbo boosters, nitrous oxide injectors, etc. You can also change the exhaust sound of your car using different mufflers.</p>
15
- <h4>Different game modes and challenges</h4>
16
- <p>The game has different game modes and challenges that you can play according to your mood and skill level. You can play the classic parking mode where you have to park your car in a designated spot without hitting any obstacles or other cars. You can also play the free driving mode where you can drive around the map without any restrictions or objectives. You can also play the drift mode where you have to perform drifts and earn points. You can also play the police mode where you can chase or be chased by the police cars. You can also play the zombie mode where you have to survive the zombie apocalypse by driving and shooting zombies. You can also play the delivery mode where you have to deliver goods from one place to another. You can also play the taxi mode where you have to pick up and drop off passengers. You can also play the tow truck mode where you have to tow broken or illegally parked cars.</p>
17
- <h2>Why download Car Parking Multiplayer Mod APK?</h2>
18
- <p>Car Parking Multiplayer is a fun and addictive game, but it also has some limitations and drawbacks. For example, you need a lot of money and gold to buy, upgrade, and customize your cars. You also need to watch ads to get some rewards or skip some levels. You also need to root your device to access some features or items. These things can make the game less enjoyable and frustrating for some players. That's why you should download Car Parking Multiplayer Mod APK, the modified version of the game that gives you everything unlocked for free.</p>
19
- <h3>Benefits of Car Parking Multiplayer Mod APK</h3>
20
- <p>Car Parking Multiplayer Mod APK is a hacked version of the game that has many benefits over the original version. Here are some of the benefits of Car Parking Multiplayer Mod APK:</p>
21
- <h4>Unlimited money and gold</h4>
22
- <p>With Car Parking Multiplayer Mod APK, you don't have to worry about running out of money and gold. You will get unlimited money and gold in your account as soon as you start the game. You can use them to buy any car you want, upgrade it, customize it, and tune it. You can also use them to buy any item you need, such as fuel, repair kits, weapons, etc.</p>
23
- <p>car parking multiplayer mod apk latest version with all unlocked<br />
24
- download car parking multiplayer mod apk unlimited money and cars<br />
25
- car parking multiplayer mod apk free download for android no root<br />
26
- how to install car parking multiplayer mod apk with obb file<br />
27
- car parking multiplayer mod apk hack online generator<br />
28
- car parking multiplayer mod apk 2023 update new features<br />
29
- car parking multiplayer mod apk revdl rexdl android 1<br />
30
- car parking multiplayer mod apk offline mode enabled<br />
31
- car parking multiplayer mod apk unlimited gold and diamonds<br />
32
- car parking multiplayer mod apk premium vip membership<br />
33
- car parking multiplayer mod apk best settings for realistic graphics<br />
34
- car parking multiplayer mod apk custom maps and vehicles<br />
35
- car parking multiplayer mod apk cheats codes and tips<br />
36
- car parking multiplayer mod apk gameplay video review<br />
37
- car parking multiplayer mod apk download link in description<br />
38
- car parking multiplayer mod apk full version cracked<br />
39
- car parking multiplayer mod apk mega mod menu<br />
40
- car parking multiplayer mod apk no ads and surveys<br />
41
- car parking multiplayer mod apk original vs modified comparison<br />
42
- car parking multiplayer mod apk safe and secure download<br />
43
- car parking multiplayer mod apk unlimited fuel and nitro<br />
44
- car parking multiplayer mod apk new cars and skins added<br />
45
- car parking multiplayer mod apk fun and challenging missions<br />
46
- car parking multiplayer mod apk high quality sound effects and music<br />
47
- car parking multiplayer mod apk support all android devices<br />
48
- car parking multiplayer mod apk easy and fast installation process<br />
49
- car parking multiplayer mod apk unlock everything without verification<br />
50
- car parking multiplayer mod apk latest bug fixes and improvements<br />
51
- car parking multiplayer mod apk realistic physics and controls<br />
52
- car parking multiplayer mod apk social media integration and chat feature<br />
53
- car parking multiplayer mod apk low mb size and battery consumption<br />
54
- car parking multiplayer mod apk unlimited xp and level up fast<br />
55
- car parking multiplayer mod apk new modes and events added regularly<br />
56
- car parking multiplayer mod apk 3d hd graphics and animation<br />
57
- car parking multiplayer mod apk user friendly interface and design<br />
58
- car parking multiplayer mod apk unlimited coins and gems generator<br />
59
- car parking multiplayer mod apk pro version unlocked for free<br />
60
- car parking multiplayer mod apk anti ban and virus protection<br />
61
- car parking multiplayer mod apk online and offline gameplay options<br />
62
- car parking multiplayer mod apk compatible with all android versions</p>
63
- <h4>All cars and items unlocked</h4>
64
- <p>With Car Parking Multiplayer Mod APK, you don't have to wait or work hard to unlock all the cars and items in the game. You will get access to all the cars and items from the beginning of the game. You can choose from more than 150 different cars, each with its own features and characteristics. You can also use any item you want, such as turbo boosters, nitrous oxide injectors, mufflers, etc.</p>
65
- <h4>No ads and no root required</h4>
66
- <p>With Car Parking Multiplayer Mod APK, you don't have to watch annoying ads or root your device to enjoy the game. The mod apk file has no ads and no root requirement. You can play the game without any interruptions or risks. You can also update the game easily without losing your progress or data.</p>
67
- <h3>How to download and install Car Parking Multiplayer Mod APK?</h3>
68
- <p>Downloading and installing Car Parking Multiplayer Mod APK is very easy and simple. Just follow these steps:</p>
69
- <h4>Step 1: Download the mod apk file from a trusted source</h4>
70
- <p>The first step is to download the mod apk file from a trusted source. You can use this link to download the latest version of Car Parking Multiplayer Mod APK for free. The file size is about 300 MB and it is safe and virus-free.</p>
71
- <h4>Step 2: Enable unknown sources on your device settings</h4>
72
- <p>The second step is to enable unknown sources on your device settings. This will allow you to install apps from sources other than Google Play Store. To do this, go to your device settings > security > unknown sources > enable.</p>
73
- <h4>Step 3: Install the mod apk file and enjoy the game</h4>
74
- <p>The third step is to install the mod apk file and enjoy the game. To do this, locate the downloaded mod apk file on your device storage and tap on it. Follow the instructions on the screen and wait for the installation process to finish. Once done, open the game and start playing with everything unlocked for free.</p>
75
- <h2>Conclusion</h2>
76
- <p>Car Parking Multiplayer is one of the best car parking simulation games for Android devices. It has realistic car physics and graphics, open world map and multiplayer mode, customization and tuning options, different game modes and challenges, and more. However, if you want to enjoy the game with unlimited money, gold, cars, items, and no ads, then you should download Car Parking Multiplayer Mod APK, the modified version of the game that gives you everything unlocked for free. Just follow the steps above to download and install Car Parking Multiplayer Mod APK on your device and have fun.</p> <p>Here are some FAQs that you might have about Car Parking Multiplayer Mod APK:</p>
77
- <h4>Q: Is Car Parking Multiplayer Mod APK safe to use?</h4>
78
- <p>A: Yes, Car Parking Multiplayer Mod APK is safe to use. It has no viruses, malware, or spyware. It also does not require root access or any permissions that might compromise your device security or privacy.</p>
79
- <h4>Q: Is Car Parking Multiplayer Mod APK compatible with my device?</h4>
80
- <p>A: Car Parking Multiplayer Mod APK is compatible with most Android devices that have Android 4.4 or higher. However, some devices might have compatibility issues due to different hardware or software specifications. If you encounter any problems while playing the game, you can contact the developer for support or try another device.</p>
81
- <h4>Q: Can I play Car Parking Multiplayer Mod APK online with other players?</h4>
82
- <p>A: Yes, you can play Car Parking Multiplayer Mod APK online with other players. The game has a multiplayer mode that allows you to join or create online rooms with up to 100 players. You can chat with them, race with them, challenge them, or just have fun together.</p>
83
- <h4>Q: Can I update Car Parking Multiplayer Mod APK to the latest version?</h4>
84
- <p>A: Yes, you can update Car Parking Multiplayer Mod APK to the latest version. However, you need to download and install the new mod apk file from the same source that you used before. You also need to uninstall the previous version of the game before installing the new one. You can also backup your game data before updating to avoid losing your progress or data.</p>
85
- <h4>Q: Can I request a new feature or report a bug for Car Parking Multiplayer Mod APK?</h4>
86
- <p>A: Yes, you can request a new feature or report a bug for Car Parking Multiplayer Mod APK. You can contact the developer through their email address or social media accounts. You can also leave a comment or feedback on the source website where you downloaded the mod apk file. The developer will try to respond to your request or fix the bug as soon as possible.</p> 197e85843d<br />
87
- <br />
88
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download queue xbox series x How to see and prioritize your game and app installations.md DELETED
@@ -1,170 +0,0 @@
1
-
2
- <h1>How to Manage Your Download Queue on Xbox Series X</h1>
3
- <p>If you have an Xbox Series X, you probably want to download and play a lot of games and apps on your console. But sometimes, you may encounter some issues with your downloads, such as slow speed, errors, or interruptions. That's why it's important to know how to manage your download queue on Xbox Series X.</p>
4
- <p>The download queue is where you can see the status of your game and app installations, as well as pause, cancel, or prioritize them. You can also launch a downloaded game or app from the queue or from a notification on your screen.</p>
5
- <h2>download queue xbox series x</h2><br /><p><b><b>Download</b> &#127383; <a href="https://jinyurl.com/2uNO1d">https://jinyurl.com/2uNO1d</a></b></p><br /><br />
6
- <p>In this article, we will show you how to view and manage your download queue on Xbox Series X, as well as how to troubleshoot some common problems with your game and app installations. We will also give you some tips on how to optimize your network bandwidth for game downloads.</p>
7
- <h2>How to View the Status of Your Game and App Installations</h2>
8
- <p>To access your download queue on Xbox Series X, follow these steps:</p>
9
- <ol>
10
- <li>Press the Xbox button <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/4/4c/Xbox_button.svg/1200px-Xbox_button.svg.png" alt="Xbox button" width="20" height="20"> on your controller to open the guide.</li>
11
- <li>Select My games & apps > See all.</li>
12
- <li>Select Manage > Queue.</li>
13
- </ol>
14
- <p>Here, you can see the active downloads with a progress bar along the bottom of the game or app's tile. You can also see the estimated time remaining, the file size, and the download speed.</p>
15
- <h3>How to Pause, Cancel, or Prioritize Your Downloads</h3>
16
- <p>From the queue, you can also control your downloads using your controller and the on-screen prompts. Here are some options:</p>
17
- <ul>
18
- <li>Select Pause all to temporarily halt all your current installations.</li>
19
- <li>Select Cancel all to remove all items from your queue altogether.</li>
20
- <li>Select one of the items in your queue to pause or cancel that individual game or app.</li>
21
- <li>Select Resume all or Resume installation if you want to resume your paused downloads.</li>
22
- <li <li>Select Move to front to make a game or app the highest priority in your queue.</li>
23
- </ul>
24
- <p>You can also use the Sort by option to arrange your queue by name, size, or date added.</p>
25
- <h3>How to Launch a Downloaded Game or App</h3>
26
- <p>Once a game or app is fully downloaded and installed, you can launch it from the queue by selecting Play. You can also launch it from a notification that pops up on your screen when the installation is complete.</p>
27
- <p>How to check download progress on Xbox Series X/S<br />
28
- Xbox Series X download queue not working<br />
29
- Xbox Series X download queue stuck<br />
30
- How to manage download queue on Xbox Series X<br />
31
- Xbox Series X download queue slow<br />
32
- How to cancel download queue on Xbox Series X<br />
33
- Xbox Series X download queue error<br />
34
- How to pause download queue on Xbox Series X<br />
35
- Xbox Series X download queue limit<br />
36
- How to prioritize download queue on Xbox Series X<br />
37
- Xbox Series X download queue full<br />
38
- How to clear download queue on Xbox Series X<br />
39
- Xbox Series X download queue issues<br />
40
- How to resume download queue on Xbox Series X<br />
41
- Xbox Series X download queue missing<br />
42
- How to view download queue on Xbox Series X<br />
43
- Xbox Series X download queue not showing<br />
44
- How to speed up download queue on Xbox Series X<br />
45
- Xbox Series X download queue order<br />
46
- How to change download queue on Xbox Series X<br />
47
- Xbox Series X download queue problem<br />
48
- How to fix download queue on Xbox Series X<br />
49
- Xbox Series X download queue size<br />
50
- How to delete download queue on Xbox Series X<br />
51
- Xbox Series X download queue update<br />
52
- How to access download queue on Xbox Series X<br />
53
- Xbox Series X download queue won't start<br />
54
- How to move download queue on Xbox Series X<br />
55
- Xbox Series X download queue location<br />
56
- How to stop download queue on Xbox Series X<br />
57
- Xbox Series X download queue offline<br />
58
- How to restart download queue on Xbox Series X<br />
59
- Xbox Series X download queue notification<br />
60
- How to transfer download queue on Xbox Series X<br />
61
- Xbox Series X download queue online<br />
62
- How to install download queue on Xbox Series X<br />
63
- Xbox Series X download queue hidden<br />
64
- How to find download queue on Xbox Series X<br />
65
- Xbox Series X download queue settings<br />
66
- How to edit download queue on Xbox Series X<br />
67
- Xbox Series X download queue tips<br />
68
- How to optimize download queue on Xbox Series X<br />
69
- Xbox Series X download queue guide<br />
70
- How to troubleshoot download queue on Xbox Series X<br />
71
- Xbox Series X download queue tricks</p>
72
- <p>Alternatively, you can use the guide to launch a downloaded game or app. Just press the Xbox button <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/4/4c/Xbox_button.svg/1200px-Xbox_button.svg.png" alt="Xbox button" width="20" height="20"> on your controller and select My games & apps > See all. Then, select the game or app you want to play from the list.</p>
73
- <h2>How to Troubleshoot Your Game and App Installations</h2>
74
- <p>Sometimes, you may encounter some issues with your game and app installations, such as slow speed, errors, or interruptions. Here are some common causes and solutions for these problems:</p>
75
- <h3>How to Check Your Internet Connection</h3>
76
- <p>Your internet connection is one of the most important factors that affect your download speed and performance. If your connection is weak, unstable, or slow, your downloads may take longer or fail.</p>
77
- <p>To check your internet connection on Xbox Series X, follow these steps:</p>
78
- <ol>
79
- <li>Press the Xbox button <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/4/4c/Xbox_button.svg/1200px-Xbox_button.svg.png" alt="Xbox button" width="20" height="20"> on your controller to open the guide.</li>
80
- <li>Select Profile & system > Settings > General > Network settings.</li>
81
- <li>Select Test network connection to see if you are connected to the internet.</li>
82
- <li>Select Test network speed & statistics to see your download speed, upload speed, and latency.</li>
83
- </ol>
84
- <p>If you see any errors or warnings, follow the instructions on the screen to fix them. You may need to reboot your router, move closer to it, or use a wired connection instead of wireless.</p> <h3>How to Check Your Account and Storage Space</h3>
85
- <p>Another factor that may affect your game and app installations is your account and storage space. You need to make sure that you are signed in to the correct account and that you have enough storage space on your console or external drive for your downloads.</p>
86
- <p>To check your account and storage space on Xbox Series X, follow these steps:</p>
87
- <ol>
88
- <li>Press the Xbox button <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/4/4c/Xbox_button.svg/1200px-Xbox_button.svg.png" alt="Xbox button" width="20" height="20"> on your controller to open the guide.</li>
89
- <li>Select Profile & system > Add or switch to see which account you are using. If you want to switch to a different account, select it from the list or add a new one.</li>
90
- <li>Select My games & apps > See all > Manage > Storage devices to see how much storage space you have left on your console or external drive. If you need more space, you can delete some games or apps that you don't use, move them to another device, or add a new device.</li>
91
- </ol>
92
- <h3>How to Restart Your Console</h3>
93
- <p>Sometimes, a simple restart can fix some issues with your game and app installations. Restarting your console can clear the cache, refresh the system, and resume your downloads.</p>
94
- <p>To restart your console on Xbox Series X, follow these steps:</p>
95
- <ol>
96
- <li>Press and hold the Xbox button <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/4/4c/Xbox_button.svg/1200px-Xbox_button.svg.png" alt="Xbox button" width="20" height="20"> on your controller until the power menu appears.</li>
97
- <li>Select Restart console > Restart.</li>
98
- <li>Wait for your console to turn off and on again.</li>
99
- <li>Check your download queue to see if your downloads are resumed or completed.</li>
100
- </ol>
101
- <h3>How to Contact Xbox Support</h3>
102
- <p>If none of the above solutions work, you may need to contact Xbox support for further assistance. Xbox support can help you with troubleshooting, error codes, refunds, warranties, and more.</p>
103
- <p>To contact Xbox support on Xbox Series X, follow these steps:</p>
104
- <ol>
105
- <li>Press the Xbox button <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/4/4c/Xbox_button.svg/1200px-Xbox_button.svg.png" alt="Xbox button" width="20" height="20"> on your controller to open the guide.</li>
106
- <li>Select Profile & system > Settings > System > Console info.</li>
107
- <li>Note down your serial number and OS version.</li>
108
- <li>Go to https://support.xbox.com/en-US/contact-us on your phone or computer.</li>
109
- <li>Select Xbox Series X|S as your device and choose a topic and issue that matches your problem.</li>
110
- <li>Follow the instructions on the screen to get help from a live agent, a chatbot, a community forum, or a self-help article.</li>
111
- </ol> <h2>How to Manage Your Network Bandwidth for Game Downloads</h2>
112
- <p>Another way to improve your download speed and performance is to manage your network bandwidth for game downloads. Network bandwidth is the amount of data that can be transferred over your internet connection at a given time. The more bandwidth you use, the faster your downloads will be, but also the more likely you will experience lag or buffering when streaming or playing online games.</p>
113
- <p>To manage your network bandwidth for game downloads on Xbox Series X, you can adjust your network settings to enable or disable automatic updates, set a download limit or schedule, or use a wired or wireless connection.</p>
114
- <h3>How to Enable or Disable Automatic Updates</h3>
115
- <p>Automatic updates are a feature that allows your console to keep your games and apps up to date without you having to manually check for updates. This can be convenient, but also consume a lot of bandwidth and slow down your other downloads.</p>
116
- <p>To enable or disable automatic updates on Xbox Series X, follow these steps:</p>
117
- <ol>
118
- <li>Press the Xbox button <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/4/4c/Xbox_button.svg/1200px-Xbox_button.svg.png" alt="Xbox button" width="20" height="20"> on your controller to open the guide.</li>
119
- <li>Select Profile & system > Settings > System > Updates & downloads.</li>
120
- <li>Select Keep my games & apps up to date to toggle the feature on or off.</li>
121
- </ol>
122
- <p>If you disable automatic updates, you will need to manually check for updates for your games and apps by going to My games & apps > See all > Manage > Updates.</p>
123
- <h3>How to Set a Download Limit or Schedule</h3>
124
- <p>A download limit or schedule is a feature that allows you to limit the amount of bandwidth used for downloads or schedule them for off-peak hours. This can help you avoid using up your data cap, reduce congestion on your network, and avoid interfering with your online gaming or streaming activities.</p>
125
- <p>To set a download limit or schedule on Xbox Series X, follow these steps:</p>
126
- <ol>
127
- <li>Press the Xbox button <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/4/4c/Xbox_button.svg/1200px-Xbox_button.svg.png" alt="Xbox button" width="20" height="20"> on your controller to open the guide.</li>
128
- <li>Select Profile & system > Settings > System > Updates & downloads.</li>
129
- <li>Select Download settings > Limit how much data I use for game and app downloads.</li>
130
- <li>Select one of the options: Don't limit, Limit 2 GB per hour, Limit 5 GB per hour, Limit 10 GB per hour, Limit 25 GB per hour, or Limit 50 GB per hour.</li>
131
- <li>Select Download settings > Schedule when I download game and app updates.</li>
132
- <li>Select one of the options: Any time, During off-peak hours only, or During off-peak hours and when I'm not playing games.</li>
133
- </ol>
134
- <p>You can also customize your off-peak hours by selecting Download settings > Change my off-peak hours and choosing a start and end time.</p> <h3>How to Use a Wired or Wireless Connection</h3>
135
- <p>The type of connection you use for your console can also affect your download speed and performance. A wired connection is usually faster, more stable, and more secure than a wireless connection, but it requires a cable and a port on your router. A wireless connection is more convenient and flexible, but it can be affected by interference, distance, and other devices on your network.</p>
136
- <p>To use a wired or wireless connection on Xbox Series X, follow these steps:</p>
137
- <ol>
138
- <li>Press the Xbox button <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/4/4c/Xbox_button.svg/1200px-Xbox_button.svg.png" alt="Xbox button" width="20" height="20"> on your controller to open the guide.</li>
139
- <li>Select Profile & system > Settings > General > Network settings.</li>
140
- <li>Select Set up wireless network to connect to a Wi-Fi network. You will need to enter your network name and password.</li>
141
- <li>Select Advanced settings > Alternate MAC address to enter a MAC address for your console if your router requires it.</li>
142
- <li>Select Advanced settings > IP settings to configure your IP address, subnet mask, gateway, and DNS servers if your network requires it.</li>
143
- <li>Or, plug an Ethernet cable into the port on the back of your console and the port on your router. Your console will automatically detect the wired connection.</li>
144
- </ol>
145
- <h2>Conclusion</h2>
146
- <p>Managing your download queue on Xbox Series X can help you enjoy your games and apps without any hassle. You can view and control your downloads, troubleshoot any issues, and optimize your network bandwidth for the best experience. Here are some tips and recommendations to remember:</p>
147
- <ul>
148
- <li>Check your download queue regularly to see the status of your installations and updates.</li>
149
- <li>Pause or cancel any downloads that you don't need or want to prioritize others.</li>
150
- <li>Launch a downloaded game or app from the queue, the notification, or the guide.</li>
151
- <li>Check your internet connection, account, storage space, and console if you encounter any problems with your downloads.</li>
152
- <li>Contact Xbox support if none of the solutions work for you.</li>
153
- <li>Enable or disable automatic updates according to your preference.</li>
154
- <li>Set a download limit or schedule to save bandwidth and avoid interference with other activities.</li>
155
- <li>Use a wired or wireless connection depending on your situation and needs.</li>
156
- </ul>
157
- <h2>FAQs</h2>
158
- <h3>Q: How do I download games and apps on Xbox Series X?</h3>
159
- <p>A: You can download games and apps on Xbox Series X from the Microsoft Store. To access the store, press the Xbox button <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/4/4c/Xbox_button.svg/1200px-Xbox_button.svg.png" alt="Xbox button" width="20" height="20"> on your controller and select Store. Then, browse or search for the game or app you want and select Get or Buy. The game or app will be added to your download queue automatically.</p>
160
- <h3>Q: How do I check for updates for my games and apps on Xbox Series X?</h3>
161
- <p>A: You can check for updates for your games and apps on Xbox Series X by going to My games & apps > See all > Manage > Updates. Here, you can see which games and apps have available updates and select them to download them. You can also enable automatic updates to keep your games and apps up to date without checking manually.</p>
162
- <h3>Q: How do I delete games and apps on Xbox Series X?</h3>
163
- <p>A: You can delete games and apps on Xbox Series X by going to My games & apps > See all > Manage > Storage devices. Here, you can see how much space each game or app takes up on your console or external drive. Select one of them and choose Uninstall to delete it. You can also select Uninstall all to delete all games and apps on that device.</p>
164
- <h3>Q: How do I move games and apps between devices on Xbox Series X?</h3>
165
- <p>A: You can move games and apps between devices on Xbox Series X by going to My games & apps > See all > Manage > Storage devices. Here, you can see which games and apps are stored on your console or external drive. Select one of them and choose Move to move it to another device. You can also select Move all to move all games and apps from one device to another.</p>
166
- <h3>Q: How do I play games from an external drive on Xbox Series X?</h3>
167
- <p>A: You can play games from an external drive on Xbox Series X by plugging the drive into one of the USB ports on the back or front of your console. Your console will detect the drive and show you the games and apps that are stored on it. You can launch them from the guide or from My games & apps. You can also move them to your console or another external drive if you want.</p>
168
- <p>Note that some games may require an update or optimization to run on Xbox Series X. You can check for these by going to My games & apps > See all > Manage > Updates > Optimized for Xbox Series X|S.</p> 197e85843d<br />
169
- <br />
170
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Explore the possibilities of melon playground a sandbox game for iOS and Android that is easy to play and hard to put down.md DELETED
@@ -1,91 +0,0 @@
1
- <br />
2
- <h1>Melon Playground: A Fun and Creative Sandbox Game for iOS Devices</h1>
3
- <p>Do you like sandbox games where you can create your own scenarios and experiment with different items? Do you have an iOS device and want to find a fun and creative app to play with? If you answered yes to both questions, then you should check out Melon Playground, a simple but amazing sandbox game that will keep you entertained for hours.</p>
4
- <h2>What is Melon Playground?</h2>
5
- <h3>A simple sandbox game where you create your own scenarios</h3>
6
- <p>Melon Playground is an indie game created by a developer named sliz. It is available on Android and iOS devices . The game is very simple: you have a scene where you can drag and drop various items from an inventory. You can then interact with these items in different ways, such as throwing them, shooting them, exploding them, or combining them. You can also change the gravity, time, weather, and other settings of the scene. The game has no rules or objectives; you are free to create whatever you want.</p>
7
- <h2>melon playground apk apple</h2><br /><p><b><b>Download File</b> &#127383; <a href="https://jinyurl.com/2uNP6j">https://jinyurl.com/2uNP6j</a></b></p><br /><br />
8
- <h3>A wide variety of items at your disposal: melee weapons, guns, barrels, and more</h3>
9
- <p>The game offers a wide variety of items that you can use in your scenarios. You can find melee weapons such as swords, axes, hammers, knives, and bats; guns such as pistols, rifles, shotguns, snipers, and rocket launchers; barrels such as oil drums, gas tanks, water barrels, and explosive barrels; and other items such as cars, bikes, planes, helicopters, boats, animals, humans, zombies, aliens, robots, ragdolls, furniture, plants, food, drinks, balls, balloons, fireworks, and more. You can also customize the color and size of some items.</p>
10
- <h3>A free app with in-app purchases and no ads subscription option</h3>
11
- <p>The game is free to download from the App Store and does not require an internet connection to play. However, some items are locked behind in-app purchases that range from $0.99 to $4.99. You can also buy a no ads subscription for $3.49 per week that removes all ads from the game. The game does not have any pop-up ads or banners; only video ads that play when you load or save a scenario or when you exit the app.</p>
12
- <h2>How to play Melon Playground?</h2>
13
- <h3>Download the app from the App Store and launch it on your device</h3>
14
- <h3>Choose a map from the menu or create your own custom map</h3>
15
- <p>Once you launch the app, you will see a menu where you can choose a map to play on. There are 12 maps available in the game, each with a different theme and environment. You can choose from city, desert, forest, island, moon, ocean, playground, snow, space, swamp, volcano, and wasteland. You can also create your own custom map by selecting the blank map option and changing the terrain, skybox, and lighting.</p>
16
- <p>melon playground app store download<br />
17
- melon playground ios sandbox game<br />
18
- melon playground apk for iphone<br />
19
- melon playground no ads subscription<br />
20
- melon playground file manager mod<br />
21
- melon playground indie game sliz<br />
22
- melon playground melee weapons guns<br />
23
- melon playground create your own scenarios<br />
24
- melon playground app privacy policy<br />
25
- melon playground app support license<br />
26
- melon playground twitter updates<br />
27
- melon playground android and ios<br />
28
- melon playground mobile application<br />
29
- melon playground 17+ age rating<br />
30
- melon playground free offers in-app purchases<br />
31
- melon playground 4.5 stars ratings<br />
32
- melon playground 333.1 mb size<br />
33
- melon playground payge ltd developer<br />
34
- melon playground macos 11.0 or later<br />
35
- melon playground ios 12.0 or later<br />
36
- melon playground barrels horror themes<br />
37
- melon playground version 15.1.1 update<br />
38
- melon playground stability improvement bugs fixes<br />
39
- melon playground net energy gain experiment<br />
40
- melon playground holy grail fusion mini sun<br />
41
- melon playground korea superconducting tokamak advanced research facility<br />
42
- melon playground nuclear fusion reaction 100 million degrees celsius<br />
43
- melon playground seven times hotter than sun core<br />
44
- melon playground sun core temperature kelvin<br />
45
- melon playground new scientist article<br />
46
- melon playground the sun news article<br />
47
- melon playground yahoo news article<br />
48
- melon playground wikipedia solar core page<br />
49
- melon playground cornell university solar layers page<br />
50
- melon playground nasa sun fact sheet page<br />
51
- melon playground montana solar physics core page<br />
52
- melon playground identifiers usage data tracking<br />
53
- melon playground purchases location user content linked data<br />
54
- melon playground diagnostics data not linked to identity<br />
55
- melon playground frequent intense cartoon fantasy violence</p>
56
- <h3>Drag and drop items from the inventory to the scene and interact with them</h3>
57
- <p>After you choose a map, you will enter the scene where you can start creating your scenario. You can access the inventory by tapping on the backpack icon on the top right corner of the screen. You can scroll through the categories and items by swiping left or right on the inventory. To place an item on the scene, simply drag and drop it from the inventory. You can also tap on an item to see its name and description.</p>
58
- <p>To interact with an item, you can tap on it or use the buttons on the bottom of the screen. You can throw an item by tapping on it and swiping in any direction. You can shoot an item by tapping on it and tapping on the target icon. You can explode an item by tapping on it and tapping on the bomb icon. You can combine two items by dragging one item over another item and tapping on the plus icon.</p>
59
- <h3>Use the buttons on the screen to move, rotate, scale, clone, delete, or freeze items</h3>
60
- <p>You can also modify the items on the scene by using the buttons on the left side of the screen. You can move an item by tapping on it and dragging it with your finger. You can rotate an item by tapping on it and using two fingers to twist it. You can scale an item by tapping on it and using two fingers to pinch or spread it. You can clone an item by tapping on it and tapping on the clone icon. You can delete an item by tapping on it and tapping on the trash icon. You can freeze an item by tapping on it and tapping on the freeze icon.</p>
61
- <h3>Save and load your scenarios or share them with other players online</h3>
62
- <p>You can save your scenarios by tapping on the save icon on the top left corner of the screen. You can name your scenario and choose a thumbnail for it. You can load your scenarios by tapping on the load icon next to the save icon. You can also share your scenarios with other players online by tapping on the share icon next to the load icon. You can upload your scenario to a server where other players can download it and rate it.</p>
63
- <h2>Why should you play Melon Playground?</h2>
64
- <h3>It stimulates your imagination and creativity</h3>
65
- <p>Melon Playground is a game that lets you unleash your imagination and creativity. You can create any scenario you want with any items you want. You can make funny, scary, action-packed, or relaxing scenarios. You can make stories, jokes, experiments, or challenges. You can make anything you can think of with Melon Playground.</p>
66
- <h3>It offers endless possibilities and fun</h3>
67
- <p>Melon Playground is a game that offers endless possibilities and fun. You can play with different items and see how they react with each other. You can change the settings of the scene and see how they affect your scenario. You can explore different maps and discover new things. You can also download other players' scenarios and see what they have created.</p>
68
- <h3>It has positive reviews and ratings from users and critics</h3>
69
- <h3>It is updated regularly with new features and improvements</h3>
70
- <p>Melon Playground is a game that is updated regularly with new features and improvements. The developer listens to the feedback and suggestions from the users and implements them in the game. The game has received several updates since its launch in 2020, adding new items, maps, settings, modes, and bug fixes. The developer also plans to add more content and features in the future, such as multiplayer mode, online chat, voice chat, VR support, and more.</p>
71
- <h2>Conclusion</h2>
72
- <h3>Melon Playground is a great sandbox game for iOS devices that lets you create your own scenarios with various items</h3>
73
- <p>In conclusion, Melon Playground is a great sandbox game for iOS devices that lets you create your own scenarios with various items. You can use melee weapons, guns, barrels, cars, planes, animals, humans, zombies, aliens, robots, ragdolls, furniture, plants, food, drinks, balls, balloons, fireworks, and more to make your scenarios. You can also change the gravity, time, weather, and other settings of the scene to make it more interesting.</p>
74
- <h3>It is easy to play, free to download, and fun to explore</h3>
75
- <p>Melon Playground is easy to play, free to download, and fun to explore. You can download it from the App Store and play it offline or online. You can drag and drop items from the inventory to the scene and interact with them using simple gestures and buttons. You can also save and load your scenarios or share them with other players online.</p>
76
- <h3>It is a popular and well-made app that deserves your attention and support</h3>
77
- <p>Melon Playground is a popular and well-made app that deserves your attention and support. It has positive reviews and ratings from users and critics . It has been featured in several media outlets such as AppAdvice, Pocket Gamer, TouchArcade, iMore, 148Apps, AppSpy, Slide to Play, Gamezebo, Cult of Mac, Macworld, The Guardian, Mashable, TechCrunch, Wired, The Verge, Polygon, Kotaku, IGN, GameSpot, PC Gamer, Forbes, Business Insider, CNN, BBC News, The New York Times, The Wall Street Journal, TIME Magazine, and more. It is also updated regularly with new features and improvements by the developer.</p>
78
- <p>If you are looking for a fun and creative sandbox game for your iOS device, you should definitely give Melon Playground a try. You will not regret it.</p>
79
- <h2>FAQs</h2>
80
- <h4>What are the minimum requirements to play Melon Playground on iOS devices?</h4>
81
- <p>To play Melon Playground on iOS devices, you need iOS 12.0 or later or iPadOS 12.0 or later. You also need at least 300 MB of free storage space on your device.</p>
82
- <h4>How can I contact the developer of Melon Playground?</h4>
83
- <p>You can contact the developer of Melon Playground by sending an email to [email protected] or by following him on Twitter @sliz_games.</p>
84
- <h4>How can I support the development of Melon Playground?</h4>
85
- <p>You can support the development of Melon Playground by buying in-app purchases or subscribing to no ads in the game. You can also rate and review the game on the App Store or share it with your friends.</p>
86
- <h4>How can I report a bug or a problem in Melon Playground?</h4>
87
- <p>You can report a bug or a problem in Melon Playground by sending an email to [email protected] or by leaving a comment on the game's page on the App Store.</p>
88
- <h4>How can I suggest a new feature or an improvement for Melon Playground?</h4>
89
- <p>You can suggest a new feature or an improvement for Melon Playground by sending an email to [email protected] or by leaving a comment on the game's page on the App Store.</p> 197e85843d<br />
90
- <br />
91
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/audio2pose_models/discriminator.py DELETED
@@ -1,76 +0,0 @@
1
- import torch
2
- import torch.nn.functional as F
3
- from torch import nn
4
-
5
- class ConvNormRelu(nn.Module):
6
- def __init__(self, conv_type='1d', in_channels=3, out_channels=64, downsample=False,
7
- kernel_size=None, stride=None, padding=None, norm='BN', leaky=False):
8
- super().__init__()
9
- if kernel_size is None:
10
- if downsample:
11
- kernel_size, stride, padding = 4, 2, 1
12
- else:
13
- kernel_size, stride, padding = 3, 1, 1
14
-
15
- if conv_type == '2d':
16
- self.conv = nn.Conv2d(
17
- in_channels,
18
- out_channels,
19
- kernel_size,
20
- stride,
21
- padding,
22
- bias=False,
23
- )
24
- if norm == 'BN':
25
- self.norm = nn.BatchNorm2d(out_channels)
26
- elif norm == 'IN':
27
- self.norm = nn.InstanceNorm2d(out_channels)
28
- else:
29
- raise NotImplementedError
30
- elif conv_type == '1d':
31
- self.conv = nn.Conv1d(
32
- in_channels,
33
- out_channels,
34
- kernel_size,
35
- stride,
36
- padding,
37
- bias=False,
38
- )
39
- if norm == 'BN':
40
- self.norm = nn.BatchNorm1d(out_channels)
41
- elif norm == 'IN':
42
- self.norm = nn.InstanceNorm1d(out_channels)
43
- else:
44
- raise NotImplementedError
45
- nn.init.kaiming_normal_(self.conv.weight)
46
-
47
- self.act = nn.LeakyReLU(negative_slope=0.2, inplace=False) if leaky else nn.ReLU(inplace=True)
48
-
49
- def forward(self, x):
50
- x = self.conv(x)
51
- if isinstance(self.norm, nn.InstanceNorm1d):
52
- x = self.norm(x.permute((0, 2, 1))).permute((0, 2, 1)) # normalize on [C]
53
- else:
54
- x = self.norm(x)
55
- x = self.act(x)
56
- return x
57
-
58
-
59
- class PoseSequenceDiscriminator(nn.Module):
60
- def __init__(self, cfg):
61
- super().__init__()
62
- self.cfg = cfg
63
- leaky = self.cfg.MODEL.DISCRIMINATOR.LEAKY_RELU
64
-
65
- self.seq = nn.Sequential(
66
- ConvNormRelu('1d', cfg.MODEL.DISCRIMINATOR.INPUT_CHANNELS, 256, downsample=True, leaky=leaky), # B, 256, 64
67
- ConvNormRelu('1d', 256, 512, downsample=True, leaky=leaky), # B, 512, 32
68
- ConvNormRelu('1d', 512, 1024, kernel_size=3, stride=1, padding=1, leaky=leaky), # B, 1024, 16
69
- nn.Conv1d(1024, 1, kernel_size=3, stride=1, padding=1, bias=True) # B, 1, 16
70
- )
71
-
72
- def forward(self, x):
73
- x = x.reshape(x.size(0), x.size(1), -1).transpose(1, 2)
74
- x = self.seq(x)
75
- x = x.squeeze(1)
76
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: RVC Inference HF
3
- emoji: 👀
4
- colorFrom: green
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.43.2
8
- app_file: app.py
9
- pinned: false
10
- ---
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/diffq/base.py DELETED
@@ -1,262 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from dataclasses import dataclass
8
- from concurrent import futures
9
- from fnmatch import fnmatch
10
- from functools import partial
11
- import io
12
- import math
13
- from multiprocessing import cpu_count
14
- import typing as tp
15
- import zlib
16
-
17
- import torch
18
-
19
-
20
- class BaseQuantizer:
21
- @dataclass
22
- class _QuantizedParam:
23
- name: str
24
- param: torch.nn.Parameter
25
- module: torch.nn.Module
26
- # If a Parameter is used multiple times, `other` can be used
27
- # to share state between the different Quantizers
28
- other: tp.Optional[tp.Any]
29
-
30
- def __init__(self, model: torch.nn.Module, min_size: float = 0.01, float16: bool = False,
31
- exclude: tp.Optional[tp.List[str]] = [], detect_bound: bool = True):
32
- self.model = model
33
- self.min_size = min_size
34
- self.float16 = float16
35
- self.exclude = exclude
36
- self.detect_bound = detect_bound
37
- self._quantized = False
38
- self._pre_handle = self.model.register_forward_pre_hook(self._forward_pre_hook)
39
- self._post_handle = self.model.register_forward_hook(self._forward_hook)
40
-
41
- self._quantized_state = None
42
- self._qparams = []
43
- self._float16 = []
44
- self._others = []
45
- self._rnns = []
46
-
47
- self._saved = []
48
-
49
- self._find_params()
50
-
51
- def _find_params(self):
52
- min_params = self.min_size * 2**20 // 4
53
- previous = {}
54
- for module_name, module in self.model.named_modules():
55
- if isinstance(module, torch.nn.RNNBase):
56
- self._rnns.append(module)
57
- for name, param in list(module.named_parameters(recurse=False)):
58
- full_name = f"{module_name}.{name}"
59
- matched = False
60
- for pattern in self.exclude:
61
- if fnmatch(full_name, pattern) or fnmatch(name, pattern):
62
- matched = True
63
- break
64
-
65
- if param.numel() <= min_params or matched:
66
- if id(param) in previous:
67
- continue
68
- if self.detect_bound:
69
- previous[id(param)] = None
70
- if self.float16:
71
- self._float16.append(param)
72
- else:
73
- self._others.append(param)
74
- else:
75
- qparam = self._register_param(name, param, module, previous.get(id(param)))
76
- if self.detect_bound:
77
- previous[id(param)] = qparam
78
- self._qparams.append(qparam)
79
-
80
- def _register_param(self, name, param, module, other):
81
- return self.__class__._QuantizedParam(name, param, module, other)
82
-
83
- def _forward_pre_hook(self, module, input):
84
- if self.model.training:
85
- self._quantized_state = None
86
- if self._quantized:
87
- self.unquantize()
88
- if self._pre_forward_train():
89
- self._fix_rnns()
90
- else:
91
- self.quantize()
92
-
93
- def _forward_hook(self, module, input, output):
94
- if self.model.training:
95
- if self._post_forward_train():
96
- self._fix_rnns(flatten=False) # Hacky, next forward will flatten
97
-
98
- def quantize(self, save=True):
99
- """
100
- Immediately apply quantization to the model parameters.
101
- If `save` is True, save a copy of the unquantized parameters, that can be
102
- restored with `unquantize()`.
103
- """
104
- if self._quantized:
105
- return
106
- if save:
107
- self._saved = [qp.param.data.to('cpu', copy=True)
108
- for qp in self._qparams if qp.other is None]
109
- self.restore_quantized_state(self.get_quantized_state())
110
- self._quantized = True
111
- self._fix_rnns()
112
-
113
- def unquantize(self):
114
- """
115
- Revert a previous call to `quantize()`.
116
- """
117
- if not self._quantized:
118
- raise RuntimeError("Can only be called on a quantized model.")
119
- if not self._saved:
120
- raise RuntimeError("Nothing to restore.")
121
- for qparam in self._qparams:
122
- if qparam.other is None:
123
- qparam.param.data[:] = self._saved.pop(0)
124
- assert len(self._saved) == 0
125
- self._quantized = False
126
- self._fix_rnns()
127
-
128
- def _pre_forward_train(self) -> bool:
129
- """
130
- Called once before each forward for continuous quantization.
131
- Should return True if parameters were changed.
132
- """
133
- return False
134
-
135
- def _post_forward_train(self) -> bool:
136
- """
137
- Called once after each forward (to restore state for instance).
138
- Should return True if parameters were changed.
139
- """
140
- return False
141
-
142
- def _fix_rnns(self, flatten=True):
143
- """
144
- To be called after quantization happened to fix RNNs.
145
- """
146
- for rnn in self._rnns:
147
- rnn._flat_weights = [
148
- (lambda wn: getattr(rnn, wn) if hasattr(rnn, wn) else None)(wn)
149
- for wn in rnn._flat_weights_names]
150
- if flatten:
151
- rnn.flatten_parameters()
152
-
153
- def get_quantized_state(self):
154
- """
155
- Returns sufficient quantized information to rebuild the model state.
156
-
157
- ..Note::
158
- To achieve maximum compression, you should compress this with
159
- gzip or other, as quantized weights are not optimally coded!
160
- """
161
- if self._quantized_state is None:
162
- self._quantized_state = self._get_quantized_state()
163
- return self._quantized_state
164
-
165
- def _get_quantized_state(self):
166
- """
167
- Actual implementation for `get_quantized_state`.
168
- """
169
- float16_params = []
170
- for p in self._float16:
171
- q = p.data.half()
172
- float16_params.append(q)
173
-
174
- return {
175
- "quantized": [self._quantize_param(qparam) for qparam in self._qparams
176
- if qparam.other is None],
177
- "float16": float16_params,
178
- "others": [p.data.clone() for p in self._others],
179
- }
180
-
181
- def _quantize_param(self, qparam: _QuantizedParam) -> tp.Any:
182
- """
183
- To be overriden.
184
- """
185
- raise NotImplementedError()
186
-
187
- def _unquantize_param(self, qparam: _QuantizedParam, quantized: tp.Any) -> torch.Tensor:
188
- """
189
- To be overriden.
190
- """
191
- raise NotImplementedError()
192
-
193
- def restore_quantized_state(self, state) -> None:
194
- """
195
- Restore the state of the model from the quantized state.
196
- """
197
- for p, q in zip(self._float16, state["float16"]):
198
- p.data[:] = q.to(p)
199
-
200
- for p, q in zip(self._others, state["others"]):
201
- p.data[:] = q
202
-
203
- remaining = list(state["quantized"])
204
- for qparam in self._qparams:
205
- if qparam.other is not None:
206
- # Only unquantize first appearance of nn.Parameter.
207
- continue
208
- quantized = remaining.pop(0)
209
- qparam.param.data[:] = self._unquantize_param(qparam, quantized)
210
- self._fix_rnns()
211
-
212
- def detach(self) -> None:
213
- """
214
- Detach from the model, removes hooks and anything else.
215
- """
216
- self._pre_handle.remove()
217
- self._post_handle.remove()
218
-
219
- def model_size(self) -> torch.Tensor:
220
- """
221
- Returns an estimate of the quantized model size.
222
- """
223
- total = torch.tensor(0.)
224
- for p in self._float16:
225
- total += 16 * p.numel()
226
- for p in self._others:
227
- total += 32 * p.numel()
228
- return total / 2**20 / 8 # bits to MegaBytes
229
-
230
- def true_model_size(self) -> float:
231
- """
232
- Return the true quantized model size, in MB, without extra
233
- compression.
234
- """
235
- return self.model_size().item()
236
-
237
- def compressed_model_size(self, compress_level=-1, num_workers=8) -> float:
238
- """
239
- Return the compressed quantized model size, in MB.
240
-
241
- Args:
242
- compress_level (int): compression level used with zlib,
243
- see `zlib.compress` for details.
244
- num_workers (int): will split the final big byte representation in that
245
- many chunks processed in parallels.
246
- """
247
- out = io.BytesIO()
248
- torch.save(self.get_quantized_state(), out)
249
- ms = _parallel_compress_len(out.getvalue(), compress_level, num_workers)
250
- return ms / 2 ** 20
251
-
252
-
253
- def _compress_len(data, compress_level):
254
- return len(zlib.compress(data, level=compress_level))
255
-
256
-
257
- def _parallel_compress_len(data, compress_level, num_workers):
258
- num_workers = min(cpu_count(), num_workers)
259
- chunk_size = int(math.ceil(len(data) / num_workers))
260
- chunks = [data[offset:offset + chunk_size] for offset in range(0, len(data), chunk_size)]
261
- with futures.ProcessPoolExecutor(num_workers) as pool:
262
- return sum(pool.map(partial(_compress_len, compress_level=compress_level), chunks))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/generate_human_motion/pyrender/pyrender/platforms/egl.py DELETED
@@ -1,219 +0,0 @@
1
- import ctypes
2
- import os
3
-
4
- import OpenGL.platform
5
-
6
- from .base import Platform
7
-
8
- EGL_PLATFORM_DEVICE_EXT = 0x313F
9
- EGL_DRM_DEVICE_FILE_EXT = 0x3233
10
-
11
-
12
- def _ensure_egl_loaded():
13
- plugin = OpenGL.platform.PlatformPlugin.by_name('egl')
14
- if plugin is None:
15
- raise RuntimeError("EGL platform plugin is not available.")
16
-
17
- plugin_class = plugin.load()
18
- plugin.loaded = True
19
- # create instance of this platform implementation
20
- plugin = plugin_class()
21
-
22
- plugin.install(vars(OpenGL.platform))
23
-
24
-
25
- _ensure_egl_loaded()
26
- from OpenGL import EGL as egl
27
-
28
-
29
- def _get_egl_func(func_name, res_type, *arg_types):
30
- address = egl.eglGetProcAddress(func_name)
31
- if address is None:
32
- return None
33
-
34
- proto = ctypes.CFUNCTYPE(res_type)
35
- proto.argtypes = arg_types
36
- func = proto(address)
37
- return func
38
-
39
-
40
- def _get_egl_struct(struct_name):
41
- from OpenGL._opaque import opaque_pointer_cls
42
- return opaque_pointer_cls(struct_name)
43
-
44
-
45
- # These are not defined in PyOpenGL by default.
46
- _EGLDeviceEXT = _get_egl_struct('EGLDeviceEXT')
47
- _eglGetPlatformDisplayEXT = _get_egl_func('eglGetPlatformDisplayEXT', egl.EGLDisplay)
48
- _eglQueryDevicesEXT = _get_egl_func('eglQueryDevicesEXT', egl.EGLBoolean)
49
- _eglQueryDeviceStringEXT = _get_egl_func('eglQueryDeviceStringEXT', ctypes.c_char_p)
50
-
51
-
52
- def query_devices():
53
- if _eglQueryDevicesEXT is None:
54
- raise RuntimeError("EGL query extension is not loaded or is not supported.")
55
-
56
- num_devices = egl.EGLint()
57
- success = _eglQueryDevicesEXT(0, None, ctypes.pointer(num_devices))
58
- if not success or num_devices.value < 1:
59
- return []
60
-
61
- devices = (_EGLDeviceEXT * num_devices.value)() # array of size num_devices
62
- success = _eglQueryDevicesEXT(num_devices.value, devices, ctypes.pointer(num_devices))
63
- if not success or num_devices.value < 1:
64
- return []
65
-
66
- return [EGLDevice(devices[i]) for i in range(num_devices.value)]
67
-
68
-
69
- def get_default_device():
70
- # Fall back to not using query extension.
71
- if _eglQueryDevicesEXT is None:
72
- return EGLDevice(None)
73
-
74
- return query_devices()[0]
75
-
76
-
77
- def get_device_by_index(device_id):
78
- if _eglQueryDevicesEXT is None and device_id == 0:
79
- return get_default_device()
80
-
81
- devices = query_devices()
82
- if device_id >= len(devices):
83
- raise ValueError('Invalid device ID ({})'.format(device_id, len(devices)))
84
- return devices[device_id]
85
-
86
-
87
- class EGLDevice:
88
-
89
- def __init__(self, display=None):
90
- self._display = display
91
-
92
- def get_display(self):
93
- if self._display is None:
94
- return egl.eglGetDisplay(egl.EGL_DEFAULT_DISPLAY)
95
-
96
- return _eglGetPlatformDisplayEXT(EGL_PLATFORM_DEVICE_EXT, self._display, None)
97
-
98
- @property
99
- def name(self):
100
- if self._display is None:
101
- return 'default'
102
-
103
- name = _eglQueryDeviceStringEXT(self._display, EGL_DRM_DEVICE_FILE_EXT)
104
- if name is None:
105
- return None
106
-
107
- return name.decode('ascii')
108
-
109
- def __repr__(self):
110
- return "<EGLDevice(name={})>".format(self.name)
111
-
112
-
113
- class EGLPlatform(Platform):
114
- """Renders using EGL.
115
- """
116
-
117
- def __init__(self, viewport_width, viewport_height, device: EGLDevice = None):
118
- super(EGLPlatform, self).__init__(viewport_width, viewport_height)
119
- if device is None:
120
- device = get_default_device()
121
-
122
- self._egl_device = device
123
- self._egl_display = None
124
- self._egl_context = None
125
-
126
- def init_context(self):
127
- _ensure_egl_loaded()
128
-
129
- from OpenGL.EGL import (
130
- EGL_SURFACE_TYPE, EGL_PBUFFER_BIT, EGL_BLUE_SIZE,
131
- EGL_RED_SIZE, EGL_GREEN_SIZE, EGL_DEPTH_SIZE,
132
- EGL_COLOR_BUFFER_TYPE, EGL_RGB_BUFFER,
133
- EGL_RENDERABLE_TYPE, EGL_OPENGL_BIT, EGL_CONFORMANT,
134
- EGL_NONE, EGL_DEFAULT_DISPLAY, EGL_NO_CONTEXT,
135
- EGL_OPENGL_API, EGL_CONTEXT_MAJOR_VERSION,
136
- EGL_CONTEXT_MINOR_VERSION,
137
- EGL_CONTEXT_OPENGL_PROFILE_MASK,
138
- EGL_CONTEXT_OPENGL_CORE_PROFILE_BIT,
139
- eglGetDisplay, eglInitialize, eglChooseConfig,
140
- eglBindAPI, eglCreateContext, EGLConfig
141
- )
142
- from OpenGL import arrays
143
-
144
- config_attributes = arrays.GLintArray.asArray([
145
- EGL_SURFACE_TYPE, EGL_PBUFFER_BIT,
146
- EGL_BLUE_SIZE, 8,
147
- EGL_RED_SIZE, 8,
148
- EGL_GREEN_SIZE, 8,
149
- EGL_DEPTH_SIZE, 24,
150
- EGL_COLOR_BUFFER_TYPE, EGL_RGB_BUFFER,
151
- EGL_RENDERABLE_TYPE, EGL_OPENGL_BIT,
152
- EGL_CONFORMANT, EGL_OPENGL_BIT,
153
- EGL_NONE
154
- ])
155
- context_attributes = arrays.GLintArray.asArray([
156
- EGL_CONTEXT_MAJOR_VERSION, 4,
157
- EGL_CONTEXT_MINOR_VERSION, 1,
158
- EGL_CONTEXT_OPENGL_PROFILE_MASK,
159
- EGL_CONTEXT_OPENGL_CORE_PROFILE_BIT,
160
- EGL_NONE
161
- ])
162
- major, minor = ctypes.c_long(), ctypes.c_long()
163
- num_configs = ctypes.c_long()
164
- configs = (EGLConfig * 1)()
165
-
166
- # Cache DISPLAY if necessary and get an off-screen EGL display
167
- orig_dpy = None
168
- if 'DISPLAY' in os.environ:
169
- orig_dpy = os.environ['DISPLAY']
170
- del os.environ['DISPLAY']
171
-
172
- self._egl_display = self._egl_device.get_display()
173
- if orig_dpy is not None:
174
- os.environ['DISPLAY'] = orig_dpy
175
-
176
- # Initialize EGL
177
- assert eglInitialize(self._egl_display, major, minor)
178
- assert eglChooseConfig(
179
- self._egl_display, config_attributes, configs, 1, num_configs
180
- )
181
-
182
- # Bind EGL to the OpenGL API
183
- assert eglBindAPI(EGL_OPENGL_API)
184
-
185
- # Create an EGL context
186
- self._egl_context = eglCreateContext(
187
- self._egl_display, configs[0],
188
- EGL_NO_CONTEXT, context_attributes
189
- )
190
-
191
- # Make it current
192
- self.make_current()
193
-
194
- def make_current(self):
195
- from OpenGL.EGL import eglMakeCurrent, EGL_NO_SURFACE
196
- assert eglMakeCurrent(
197
- self._egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE,
198
- self._egl_context
199
- )
200
-
201
- def make_uncurrent(self):
202
- """Make the OpenGL context uncurrent.
203
- """
204
- pass
205
-
206
- def delete_context(self):
207
- from OpenGL.EGL import eglDestroyContext, eglTerminate
208
- if self._egl_display is not None:
209
- if self._egl_context is not None:
210
- eglDestroyContext(self._egl_display, self._egl_context)
211
- self._egl_context = None
212
- eglTerminate(self._egl_display)
213
- self._egl_display = None
214
-
215
- def supports_framebuffers(self):
216
- return True
217
-
218
-
219
- __all__ = ['EGLPlatform']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/commons/normalizing_flow/glow_modules.py DELETED
@@ -1,362 +0,0 @@
1
- import scipy
2
- from torch.nn import functional as F
3
- import torch
4
- from torch import nn
5
- import numpy as np
6
- from modules.commons.wavenet import WN
7
- from modules.glow import utils
8
-
9
-
10
- class ActNorm(nn.Module):
11
- def __init__(self, channels, ddi=False, **kwargs):
12
- super().__init__()
13
- self.channels = channels
14
- self.initialized = not ddi
15
-
16
- self.logs = nn.Parameter(torch.zeros(1, channels, 1))
17
- self.bias = nn.Parameter(torch.zeros(1, channels, 1))
18
-
19
- def forward(self, x, x_mask=None, reverse=False, **kwargs):
20
- if x_mask is None:
21
- x_mask = torch.ones(x.size(0), 1, x.size(2)).to(device=x.device, dtype=x.dtype)
22
- x_len = torch.sum(x_mask, [1, 2])
23
- if not self.initialized:
24
- self.initialize(x, x_mask)
25
- self.initialized = True
26
-
27
- if reverse:
28
- z = (x - self.bias) * torch.exp(-self.logs) * x_mask
29
- logdet = torch.sum(-self.logs) * x_len
30
- else:
31
- z = (self.bias + torch.exp(self.logs) * x) * x_mask
32
- logdet = torch.sum(self.logs) * x_len # [b]
33
- return z, logdet
34
-
35
- def store_inverse(self):
36
- pass
37
-
38
- def set_ddi(self, ddi):
39
- self.initialized = not ddi
40
-
41
- def initialize(self, x, x_mask):
42
- with torch.no_grad():
43
- denom = torch.sum(x_mask, [0, 2])
44
- m = torch.sum(x * x_mask, [0, 2]) / denom
45
- m_sq = torch.sum(x * x * x_mask, [0, 2]) / denom
46
- v = m_sq - (m ** 2)
47
- logs = 0.5 * torch.log(torch.clamp_min(v, 1e-6))
48
-
49
- bias_init = (-m * torch.exp(-logs)).view(*self.bias.shape).to(dtype=self.bias.dtype)
50
- logs_init = (-logs).view(*self.logs.shape).to(dtype=self.logs.dtype)
51
-
52
- self.bias.data.copy_(bias_init)
53
- self.logs.data.copy_(logs_init)
54
-
55
-
56
- class InvConvNear(nn.Module):
57
- def __init__(self, channels, n_split=4, no_jacobian=False, lu=True, n_sqz=2, **kwargs):
58
- super().__init__()
59
- assert (n_split % 2 == 0)
60
- self.channels = channels
61
- self.n_split = n_split
62
- self.n_sqz = n_sqz
63
- self.no_jacobian = no_jacobian
64
-
65
- w_init = torch.qr(torch.FloatTensor(self.n_split, self.n_split).normal_())[0]
66
- if torch.det(w_init) < 0:
67
- w_init[:, 0] = -1 * w_init[:, 0]
68
- self.lu = lu
69
- if lu:
70
- # LU decomposition can slightly speed up the inverse
71
- np_p, np_l, np_u = scipy.linalg.lu(w_init)
72
- np_s = np.diag(np_u)
73
- np_sign_s = np.sign(np_s)
74
- np_log_s = np.log(np.abs(np_s))
75
- np_u = np.triu(np_u, k=1)
76
- l_mask = np.tril(np.ones(w_init.shape, dtype=float), -1)
77
- eye = np.eye(*w_init.shape, dtype=float)
78
-
79
- self.register_buffer('p', torch.Tensor(np_p.astype(float)))
80
- self.register_buffer('sign_s', torch.Tensor(np_sign_s.astype(float)))
81
- self.l = nn.Parameter(torch.Tensor(np_l.astype(float)), requires_grad=True)
82
- self.log_s = nn.Parameter(torch.Tensor(np_log_s.astype(float)), requires_grad=True)
83
- self.u = nn.Parameter(torch.Tensor(np_u.astype(float)), requires_grad=True)
84
- self.register_buffer('l_mask', torch.Tensor(l_mask))
85
- self.register_buffer('eye', torch.Tensor(eye))
86
- else:
87
- self.weight = nn.Parameter(w_init)
88
-
89
- def forward(self, x, x_mask=None, reverse=False, **kwargs):
90
- b, c, t = x.size()
91
- assert (c % self.n_split == 0)
92
- if x_mask is None:
93
- x_mask = 1
94
- x_len = torch.ones((b,), dtype=x.dtype, device=x.device) * t
95
- else:
96
- x_len = torch.sum(x_mask, [1, 2])
97
-
98
- x = x.view(b, self.n_sqz, c // self.n_split, self.n_split // self.n_sqz, t)
99
- x = x.permute(0, 1, 3, 2, 4).contiguous().view(b, self.n_split, c // self.n_split, t)
100
-
101
- if self.lu:
102
- self.weight, log_s = self._get_weight()
103
- logdet = log_s.sum()
104
- logdet = logdet * (c / self.n_split) * x_len
105
- else:
106
- logdet = torch.logdet(self.weight) * (c / self.n_split) * x_len # [b]
107
-
108
- if reverse:
109
- if hasattr(self, "weight_inv"):
110
- weight = self.weight_inv
111
- else:
112
- weight = torch.inverse(self.weight.float()).to(dtype=self.weight.dtype)
113
- logdet = -logdet
114
- else:
115
- weight = self.weight
116
- if self.no_jacobian:
117
- logdet = 0
118
-
119
- weight = weight.view(self.n_split, self.n_split, 1, 1)
120
- z = F.conv2d(x, weight)
121
-
122
- z = z.view(b, self.n_sqz, self.n_split // self.n_sqz, c // self.n_split, t)
123
- z = z.permute(0, 1, 3, 2, 4).contiguous().view(b, c, t) * x_mask
124
- return z, logdet
125
-
126
- def _get_weight(self):
127
- l, log_s, u = self.l, self.log_s, self.u
128
- l = l * self.l_mask + self.eye
129
- u = u * self.l_mask.transpose(0, 1).contiguous() + torch.diag(self.sign_s * torch.exp(log_s))
130
- weight = torch.matmul(self.p, torch.matmul(l, u))
131
- return weight, log_s
132
-
133
- def store_inverse(self):
134
- weight, _ = self._get_weight()
135
- self.weight_inv = torch.inverse(weight.float()).to(next(self.parameters()).device)
136
-
137
-
138
- class InvConv(nn.Module):
139
- def __init__(self, channels, no_jacobian=False, lu=True, **kwargs):
140
- super().__init__()
141
- w_shape = [channels, channels]
142
- w_init = np.linalg.qr(np.random.randn(*w_shape))[0].astype(float)
143
- LU_decomposed = lu
144
- if not LU_decomposed:
145
- # Sample a random orthogonal matrix:
146
- self.register_parameter("weight", nn.Parameter(torch.Tensor(w_init)))
147
- else:
148
- np_p, np_l, np_u = scipy.linalg.lu(w_init)
149
- np_s = np.diag(np_u)
150
- np_sign_s = np.sign(np_s)
151
- np_log_s = np.log(np.abs(np_s))
152
- np_u = np.triu(np_u, k=1)
153
- l_mask = np.tril(np.ones(w_shape, dtype=float), -1)
154
- eye = np.eye(*w_shape, dtype=float)
155
-
156
- self.register_buffer('p', torch.Tensor(np_p.astype(float)))
157
- self.register_buffer('sign_s', torch.Tensor(np_sign_s.astype(float)))
158
- self.l = nn.Parameter(torch.Tensor(np_l.astype(float)))
159
- self.log_s = nn.Parameter(torch.Tensor(np_log_s.astype(float)))
160
- self.u = nn.Parameter(torch.Tensor(np_u.astype(float)))
161
- self.l_mask = torch.Tensor(l_mask)
162
- self.eye = torch.Tensor(eye)
163
- self.w_shape = w_shape
164
- self.LU = LU_decomposed
165
- self.weight = None
166
-
167
- def get_weight(self, device, reverse):
168
- w_shape = self.w_shape
169
- self.p = self.p.to(device)
170
- self.sign_s = self.sign_s.to(device)
171
- self.l_mask = self.l_mask.to(device)
172
- self.eye = self.eye.to(device)
173
- l = self.l * self.l_mask + self.eye
174
- u = self.u * self.l_mask.transpose(0, 1).contiguous() + torch.diag(self.sign_s * torch.exp(self.log_s))
175
- dlogdet = self.log_s.sum()
176
- if not reverse:
177
- w = torch.matmul(self.p, torch.matmul(l, u))
178
- else:
179
- l = torch.inverse(l.double()).float()
180
- u = torch.inverse(u.double()).float()
181
- w = torch.matmul(u, torch.matmul(l, self.p.inverse()))
182
- return w.view(w_shape[0], w_shape[1], 1), dlogdet
183
-
184
- def forward(self, x, x_mask=None, reverse=False, **kwargs):
185
- """
186
- log-det = log|abs(|W|)| * pixels
187
- """
188
- b, c, t = x.size()
189
- if x_mask is None:
190
- x_len = torch.ones((b,), dtype=x.dtype, device=x.device) * t
191
- else:
192
- x_len = torch.sum(x_mask, [1, 2])
193
- logdet = 0
194
- if not reverse:
195
- weight, dlogdet = self.get_weight(x.device, reverse)
196
- z = F.conv1d(x, weight)
197
- if logdet is not None:
198
- logdet = logdet + dlogdet * x_len
199
- return z, logdet
200
- else:
201
- if self.weight is None:
202
- weight, dlogdet = self.get_weight(x.device, reverse)
203
- else:
204
- weight, dlogdet = self.weight, self.dlogdet
205
- z = F.conv1d(x, weight)
206
- if logdet is not None:
207
- logdet = logdet - dlogdet * x_len
208
- return z, logdet
209
-
210
- def store_inverse(self):
211
- self.weight, self.dlogdet = self.get_weight('cuda', reverse=True)
212
-
213
-
214
- class CouplingBlock(nn.Module):
215
- def __init__(self, in_channels, hidden_channels, kernel_size, dilation_rate, n_layers,
216
- gin_channels=0, p_dropout=0, sigmoid_scale=False, wn=None):
217
- super().__init__()
218
- self.in_channels = in_channels
219
- self.hidden_channels = hidden_channels
220
- self.kernel_size = kernel_size
221
- self.dilation_rate = dilation_rate
222
- self.n_layers = n_layers
223
- self.gin_channels = gin_channels
224
- self.p_dropout = p_dropout
225
- self.sigmoid_scale = sigmoid_scale
226
-
227
- start = torch.nn.Conv1d(in_channels // 2, hidden_channels, 1)
228
- start = torch.nn.utils.weight_norm(start)
229
- self.start = start
230
- # Initializing last layer to 0 makes the affine coupling layers
231
- # do nothing at first. This helps with training stability
232
- end = torch.nn.Conv1d(hidden_channels, in_channels, 1)
233
- end.weight.data.zero_()
234
- end.bias.data.zero_()
235
- self.end = end
236
- self.wn = WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels, p_dropout)
237
- if wn is not None:
238
- self.wn.in_layers = wn.in_layers
239
- self.wn.res_skip_layers = wn.res_skip_layers
240
-
241
- def forward(self, x, x_mask=None, reverse=False, g=None, **kwargs):
242
- if x_mask is None:
243
- x_mask = 1
244
- x_0, x_1 = x[:, :self.in_channels // 2], x[:, self.in_channels // 2:]
245
-
246
- x = self.start(x_0) * x_mask
247
- x = self.wn(x, x_mask, g)
248
- out = self.end(x)
249
-
250
- z_0 = x_0
251
- m = out[:, :self.in_channels // 2, :]
252
- logs = out[:, self.in_channels // 2:, :]
253
- if self.sigmoid_scale:
254
- logs = torch.log(1e-6 + torch.sigmoid(logs + 2))
255
- if reverse:
256
- z_1 = (x_1 - m) * torch.exp(-logs) * x_mask
257
- logdet = torch.sum(-logs * x_mask, [1, 2])
258
- else:
259
- z_1 = (m + torch.exp(logs) * x_1) * x_mask
260
- logdet = torch.sum(logs * x_mask, [1, 2])
261
- z = torch.cat([z_0, z_1], 1)
262
- return z, logdet
263
-
264
- def store_inverse(self):
265
- self.wn.remove_weight_norm()
266
-
267
-
268
- class Glow(nn.Module):
269
- def __init__(self,
270
- in_channels,
271
- hidden_channels,
272
- kernel_size,
273
- dilation_rate,
274
- n_blocks,
275
- n_layers,
276
- p_dropout=0.,
277
- n_split=4,
278
- n_sqz=2,
279
- sigmoid_scale=False,
280
- gin_channels=0,
281
- inv_conv_type='near',
282
- share_cond_layers=False,
283
- share_wn_layers=0,
284
- ):
285
- super().__init__()
286
-
287
- self.in_channels = in_channels
288
- self.hidden_channels = hidden_channels
289
- self.kernel_size = kernel_size
290
- self.dilation_rate = dilation_rate
291
- self.n_blocks = n_blocks
292
- self.n_layers = n_layers
293
- self.p_dropout = p_dropout
294
- self.n_split = n_split
295
- self.n_sqz = n_sqz
296
- self.sigmoid_scale = sigmoid_scale
297
- self.gin_channels = gin_channels
298
- self.share_cond_layers = share_cond_layers
299
- if gin_channels != 0 and share_cond_layers:
300
- cond_layer = torch.nn.Conv1d(gin_channels * n_sqz, 2 * hidden_channels * n_layers, 1)
301
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
302
- wn = None
303
- self.flows = nn.ModuleList()
304
- for b in range(n_blocks):
305
- self.flows.append(ActNorm(channels=in_channels * n_sqz))
306
- if inv_conv_type == 'near':
307
- self.flows.append(InvConvNear(channels=in_channels * n_sqz, n_split=n_split, n_sqz=n_sqz))
308
- if inv_conv_type == 'invconv':
309
- self.flows.append(InvConv(channels=in_channels * n_sqz))
310
- if share_wn_layers > 0:
311
- if b % share_wn_layers == 0:
312
- wn = WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels * n_sqz,
313
- p_dropout, share_cond_layers)
314
- self.flows.append(
315
- CouplingBlock(
316
- in_channels * n_sqz,
317
- hidden_channels,
318
- kernel_size=kernel_size,
319
- dilation_rate=dilation_rate,
320
- n_layers=n_layers,
321
- gin_channels=gin_channels * n_sqz,
322
- p_dropout=p_dropout,
323
- sigmoid_scale=sigmoid_scale,
324
- wn=wn
325
- ))
326
-
327
- def forward(self, x, x_mask=None, g=None, reverse=False, return_hiddens=False):
328
- logdet_tot = 0
329
- if not reverse:
330
- flows = self.flows
331
- else:
332
- flows = reversed(self.flows)
333
- if return_hiddens:
334
- hs = []
335
- if self.n_sqz > 1:
336
- x, x_mask_ = utils.squeeze(x, x_mask, self.n_sqz)
337
- if g is not None:
338
- g, _ = utils.squeeze(g, x_mask, self.n_sqz)
339
- x_mask = x_mask_
340
- if self.share_cond_layers and g is not None:
341
- g = self.cond_layer(g)
342
- for f in flows:
343
- x, logdet = f(x, x_mask, g=g, reverse=reverse)
344
- if return_hiddens:
345
- hs.append(x)
346
- logdet_tot += logdet
347
- if self.n_sqz > 1:
348
- x, x_mask = utils.unsqueeze(x, x_mask, self.n_sqz)
349
- if return_hiddens:
350
- return x, logdet_tot, hs
351
- return x, logdet_tot
352
-
353
- def store_inverse(self):
354
- def remove_weight_norm(m):
355
- try:
356
- nn.utils.remove_weight_norm(m)
357
- except ValueError: # this module didn't have weight norm
358
- return
359
-
360
- self.apply(remove_weight_norm)
361
- for f in self.flows:
362
- f.store_inverse()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/x_transformer.py DELETED
@@ -1,641 +0,0 @@
1
- """shout-out to https://github.com/lucidrains/x-transformers/tree/main/x_transformers"""
2
- import torch
3
- from torch import nn, einsum
4
- import torch.nn.functional as F
5
- from functools import partial
6
- from inspect import isfunction
7
- from collections import namedtuple
8
- from einops import rearrange, repeat, reduce
9
-
10
- # constants
11
-
12
- DEFAULT_DIM_HEAD = 64
13
-
14
- Intermediates = namedtuple('Intermediates', [
15
- 'pre_softmax_attn',
16
- 'post_softmax_attn'
17
- ])
18
-
19
- LayerIntermediates = namedtuple('Intermediates', [
20
- 'hiddens',
21
- 'attn_intermediates'
22
- ])
23
-
24
-
25
- class AbsolutePositionalEmbedding(nn.Module):
26
- def __init__(self, dim, max_seq_len):
27
- super().__init__()
28
- self.emb = nn.Embedding(max_seq_len, dim)
29
- self.init_()
30
-
31
- def init_(self):
32
- nn.init.normal_(self.emb.weight, std=0.02)
33
-
34
- def forward(self, x):
35
- n = torch.arange(x.shape[1], device=x.device)
36
- return self.emb(n)[None, :, :]
37
-
38
-
39
- class FixedPositionalEmbedding(nn.Module):
40
- def __init__(self, dim):
41
- super().__init__()
42
- inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
43
- self.register_buffer('inv_freq', inv_freq)
44
-
45
- def forward(self, x, seq_dim=1, offset=0):
46
- t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) + offset
47
- sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq)
48
- emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1)
49
- return emb[None, :, :]
50
-
51
-
52
- # helpers
53
-
54
- def exists(val):
55
- return val is not None
56
-
57
-
58
- def default(val, d):
59
- if exists(val):
60
- return val
61
- return d() if isfunction(d) else d
62
-
63
-
64
- def always(val):
65
- def inner(*args, **kwargs):
66
- return val
67
- return inner
68
-
69
-
70
- def not_equals(val):
71
- def inner(x):
72
- return x != val
73
- return inner
74
-
75
-
76
- def equals(val):
77
- def inner(x):
78
- return x == val
79
- return inner
80
-
81
-
82
- def max_neg_value(tensor):
83
- return -torch.finfo(tensor.dtype).max
84
-
85
-
86
- # keyword argument helpers
87
-
88
- def pick_and_pop(keys, d):
89
- values = list(map(lambda key: d.pop(key), keys))
90
- return dict(zip(keys, values))
91
-
92
-
93
- def group_dict_by_key(cond, d):
94
- return_val = [dict(), dict()]
95
- for key in d.keys():
96
- match = bool(cond(key))
97
- ind = int(not match)
98
- return_val[ind][key] = d[key]
99
- return (*return_val,)
100
-
101
-
102
- def string_begins_with(prefix, str):
103
- return str.startswith(prefix)
104
-
105
-
106
- def group_by_key_prefix(prefix, d):
107
- return group_dict_by_key(partial(string_begins_with, prefix), d)
108
-
109
-
110
- def groupby_prefix_and_trim(prefix, d):
111
- kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
112
- kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
113
- return kwargs_without_prefix, kwargs
114
-
115
-
116
- # classes
117
- class Scale(nn.Module):
118
- def __init__(self, value, fn):
119
- super().__init__()
120
- self.value = value
121
- self.fn = fn
122
-
123
- def forward(self, x, **kwargs):
124
- x, *rest = self.fn(x, **kwargs)
125
- return (x * self.value, *rest)
126
-
127
-
128
- class Rezero(nn.Module):
129
- def __init__(self, fn):
130
- super().__init__()
131
- self.fn = fn
132
- self.g = nn.Parameter(torch.zeros(1))
133
-
134
- def forward(self, x, **kwargs):
135
- x, *rest = self.fn(x, **kwargs)
136
- return (x * self.g, *rest)
137
-
138
-
139
- class ScaleNorm(nn.Module):
140
- def __init__(self, dim, eps=1e-5):
141
- super().__init__()
142
- self.scale = dim ** -0.5
143
- self.eps = eps
144
- self.g = nn.Parameter(torch.ones(1))
145
-
146
- def forward(self, x):
147
- norm = torch.norm(x, dim=-1, keepdim=True) * self.scale
148
- return x / norm.clamp(min=self.eps) * self.g
149
-
150
-
151
- class RMSNorm(nn.Module):
152
- def __init__(self, dim, eps=1e-8):
153
- super().__init__()
154
- self.scale = dim ** -0.5
155
- self.eps = eps
156
- self.g = nn.Parameter(torch.ones(dim))
157
-
158
- def forward(self, x):
159
- norm = torch.norm(x, dim=-1, keepdim=True) * self.scale
160
- return x / norm.clamp(min=self.eps) * self.g
161
-
162
-
163
- class Residual(nn.Module):
164
- def forward(self, x, residual):
165
- return x + residual
166
-
167
-
168
- class GRUGating(nn.Module):
169
- def __init__(self, dim):
170
- super().__init__()
171
- self.gru = nn.GRUCell(dim, dim)
172
-
173
- def forward(self, x, residual):
174
- gated_output = self.gru(
175
- rearrange(x, 'b n d -> (b n) d'),
176
- rearrange(residual, 'b n d -> (b n) d')
177
- )
178
-
179
- return gated_output.reshape_as(x)
180
-
181
-
182
- # feedforward
183
-
184
- class GEGLU(nn.Module):
185
- def __init__(self, dim_in, dim_out):
186
- super().__init__()
187
- self.proj = nn.Linear(dim_in, dim_out * 2)
188
-
189
- def forward(self, x):
190
- x, gate = self.proj(x).chunk(2, dim=-1)
191
- return x * F.gelu(gate)
192
-
193
-
194
- class FeedForward(nn.Module):
195
- def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
196
- super().__init__()
197
- inner_dim = int(dim * mult)
198
- dim_out = default(dim_out, dim)
199
- project_in = nn.Sequential(
200
- nn.Linear(dim, inner_dim),
201
- nn.GELU()
202
- ) if not glu else GEGLU(dim, inner_dim)
203
-
204
- self.net = nn.Sequential(
205
- project_in,
206
- nn.Dropout(dropout),
207
- nn.Linear(inner_dim, dim_out)
208
- )
209
-
210
- def forward(self, x):
211
- return self.net(x)
212
-
213
-
214
- # attention.
215
- class Attention(nn.Module):
216
- def __init__(
217
- self,
218
- dim,
219
- dim_head=DEFAULT_DIM_HEAD,
220
- heads=8,
221
- causal=False,
222
- mask=None,
223
- talking_heads=False,
224
- sparse_topk=None,
225
- use_entmax15=False,
226
- num_mem_kv=0,
227
- dropout=0.,
228
- on_attn=False
229
- ):
230
- super().__init__()
231
- if use_entmax15:
232
- raise NotImplementedError("Check out entmax activation instead of softmax activation!")
233
- self.scale = dim_head ** -0.5
234
- self.heads = heads
235
- self.causal = causal
236
- self.mask = mask
237
-
238
- inner_dim = dim_head * heads
239
-
240
- self.to_q = nn.Linear(dim, inner_dim, bias=False)
241
- self.to_k = nn.Linear(dim, inner_dim, bias=False)
242
- self.to_v = nn.Linear(dim, inner_dim, bias=False)
243
- self.dropout = nn.Dropout(dropout)
244
-
245
- # talking heads
246
- self.talking_heads = talking_heads
247
- if talking_heads:
248
- self.pre_softmax_proj = nn.Parameter(torch.randn(heads, heads))
249
- self.post_softmax_proj = nn.Parameter(torch.randn(heads, heads))
250
-
251
- # explicit topk sparse attention
252
- self.sparse_topk = sparse_topk
253
-
254
- # entmax
255
- #self.attn_fn = entmax15 if use_entmax15 else F.softmax
256
- self.attn_fn = F.softmax
257
-
258
- # add memory key / values
259
- self.num_mem_kv = num_mem_kv
260
- if num_mem_kv > 0:
261
- self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
262
- self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
263
-
264
- # attention on attention
265
- self.attn_on_attn = on_attn
266
- self.to_out = nn.Sequential(nn.Linear(inner_dim, dim * 2), nn.GLU()) if on_attn else nn.Linear(inner_dim, dim)
267
-
268
- def forward(
269
- self,
270
- x,
271
- context=None,
272
- mask=None,
273
- context_mask=None,
274
- rel_pos=None,
275
- sinusoidal_emb=None,
276
- prev_attn=None,
277
- mem=None
278
- ):
279
- b, n, _, h, talking_heads, device = *x.shape, self.heads, self.talking_heads, x.device
280
- kv_input = default(context, x)
281
-
282
- q_input = x
283
- k_input = kv_input
284
- v_input = kv_input
285
-
286
- if exists(mem):
287
- k_input = torch.cat((mem, k_input), dim=-2)
288
- v_input = torch.cat((mem, v_input), dim=-2)
289
-
290
- if exists(sinusoidal_emb):
291
- # in shortformer, the query would start at a position offset depending on the past cached memory
292
- offset = k_input.shape[-2] - q_input.shape[-2]
293
- q_input = q_input + sinusoidal_emb(q_input, offset=offset)
294
- k_input = k_input + sinusoidal_emb(k_input)
295
-
296
- q = self.to_q(q_input)
297
- k = self.to_k(k_input)
298
- v = self.to_v(v_input)
299
-
300
- q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))
301
-
302
- input_mask = None
303
- if any(map(exists, (mask, context_mask))):
304
- q_mask = default(mask, lambda: torch.ones((b, n), device=device).bool())
305
- k_mask = q_mask if not exists(context) else context_mask
306
- k_mask = default(k_mask, lambda: torch.ones((b, k.shape[-2]), device=device).bool())
307
- q_mask = rearrange(q_mask, 'b i -> b () i ()')
308
- k_mask = rearrange(k_mask, 'b j -> b () () j')
309
- input_mask = q_mask * k_mask
310
-
311
- if self.num_mem_kv > 0:
312
- mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b=b), (self.mem_k, self.mem_v))
313
- k = torch.cat((mem_k, k), dim=-2)
314
- v = torch.cat((mem_v, v), dim=-2)
315
- if exists(input_mask):
316
- input_mask = F.pad(input_mask, (self.num_mem_kv, 0), value=True)
317
-
318
- dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
319
- mask_value = max_neg_value(dots)
320
-
321
- if exists(prev_attn):
322
- dots = dots + prev_attn
323
-
324
- pre_softmax_attn = dots
325
-
326
- if talking_heads:
327
- dots = einsum('b h i j, h k -> b k i j', dots, self.pre_softmax_proj).contiguous()
328
-
329
- if exists(rel_pos):
330
- dots = rel_pos(dots)
331
-
332
- if exists(input_mask):
333
- dots.masked_fill_(~input_mask, mask_value)
334
- del input_mask
335
-
336
- if self.causal:
337
- i, j = dots.shape[-2:]
338
- r = torch.arange(i, device=device)
339
- mask = rearrange(r, 'i -> () () i ()') < rearrange(r, 'j -> () () () j')
340
- mask = F.pad(mask, (j - i, 0), value=False)
341
- dots.masked_fill_(mask, mask_value)
342
- del mask
343
-
344
- if exists(self.sparse_topk) and self.sparse_topk < dots.shape[-1]:
345
- top, _ = dots.topk(self.sparse_topk, dim=-1)
346
- vk = top[..., -1].unsqueeze(-1).expand_as(dots)
347
- mask = dots < vk
348
- dots.masked_fill_(mask, mask_value)
349
- del mask
350
-
351
- attn = self.attn_fn(dots, dim=-1)
352
- post_softmax_attn = attn
353
-
354
- attn = self.dropout(attn)
355
-
356
- if talking_heads:
357
- attn = einsum('b h i j, h k -> b k i j', attn, self.post_softmax_proj).contiguous()
358
-
359
- out = einsum('b h i j, b h j d -> b h i d', attn, v)
360
- out = rearrange(out, 'b h n d -> b n (h d)')
361
-
362
- intermediates = Intermediates(
363
- pre_softmax_attn=pre_softmax_attn,
364
- post_softmax_attn=post_softmax_attn
365
- )
366
-
367
- return self.to_out(out), intermediates
368
-
369
-
370
- class AttentionLayers(nn.Module):
371
- def __init__(
372
- self,
373
- dim,
374
- depth,
375
- heads=8,
376
- causal=False,
377
- cross_attend=False,
378
- only_cross=False,
379
- use_scalenorm=False,
380
- use_rmsnorm=False,
381
- use_rezero=False,
382
- rel_pos_num_buckets=32,
383
- rel_pos_max_distance=128,
384
- position_infused_attn=False,
385
- custom_layers=None,
386
- sandwich_coef=None,
387
- par_ratio=None,
388
- residual_attn=False,
389
- cross_residual_attn=False,
390
- macaron=False,
391
- pre_norm=True,
392
- gate_residual=False,
393
- **kwargs
394
- ):
395
- super().__init__()
396
- ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs)
397
- attn_kwargs, _ = groupby_prefix_and_trim('attn_', kwargs)
398
-
399
- dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD)
400
-
401
- self.dim = dim
402
- self.depth = depth
403
- self.layers = nn.ModuleList([])
404
-
405
- self.has_pos_emb = position_infused_attn
406
- self.pia_pos_emb = FixedPositionalEmbedding(dim) if position_infused_attn else None
407
- self.rotary_pos_emb = always(None)
408
-
409
- assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance'
410
- self.rel_pos = None
411
-
412
- self.pre_norm = pre_norm
413
-
414
- self.residual_attn = residual_attn
415
- self.cross_residual_attn = cross_residual_attn
416
-
417
- norm_class = ScaleNorm if use_scalenorm else nn.LayerNorm
418
- norm_class = RMSNorm if use_rmsnorm else norm_class
419
- norm_fn = partial(norm_class, dim)
420
-
421
- norm_fn = nn.Identity if use_rezero else norm_fn
422
- branch_fn = Rezero if use_rezero else None
423
-
424
- if cross_attend and not only_cross:
425
- default_block = ('a', 'c', 'f')
426
- elif cross_attend and only_cross:
427
- default_block = ('c', 'f')
428
- else:
429
- default_block = ('a', 'f')
430
-
431
- if macaron:
432
- default_block = ('f',) + default_block
433
-
434
- if exists(custom_layers):
435
- layer_types = custom_layers
436
- elif exists(par_ratio):
437
- par_depth = depth * len(default_block)
438
- assert 1 < par_ratio <= par_depth, 'par ratio out of range'
439
- default_block = tuple(filter(not_equals('f'), default_block))
440
- par_attn = par_depth // par_ratio
441
- depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper
442
- par_width = (depth_cut + depth_cut // par_attn) // par_attn
443
- assert len(default_block) <= par_width, 'default block is too large for par_ratio'
444
- par_block = default_block + ('f',) * (par_width - len(default_block))
445
- par_head = par_block * par_attn
446
- layer_types = par_head + ('f',) * (par_depth - len(par_head))
447
- elif exists(sandwich_coef):
448
- assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth'
449
- layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef
450
- else:
451
- layer_types = default_block * depth
452
-
453
- self.layer_types = layer_types
454
- self.num_attn_layers = len(list(filter(equals('a'), layer_types)))
455
-
456
- for layer_type in self.layer_types:
457
- if layer_type == 'a':
458
- layer = Attention(dim, heads=heads, causal=causal, **attn_kwargs)
459
- elif layer_type == 'c':
460
- layer = Attention(dim, heads=heads, **attn_kwargs)
461
- elif layer_type == 'f':
462
- layer = FeedForward(dim, **ff_kwargs)
463
- layer = layer if not macaron else Scale(0.5, layer)
464
- else:
465
- raise Exception(f'invalid layer type {layer_type}')
466
-
467
- if isinstance(layer, Attention) and exists(branch_fn):
468
- layer = branch_fn(layer)
469
-
470
- if gate_residual:
471
- residual_fn = GRUGating(dim)
472
- else:
473
- residual_fn = Residual()
474
-
475
- self.layers.append(nn.ModuleList([
476
- norm_fn(),
477
- layer,
478
- residual_fn
479
- ]))
480
-
481
- def forward(
482
- self,
483
- x,
484
- context=None,
485
- mask=None,
486
- context_mask=None,
487
- mems=None,
488
- return_hiddens=False
489
- ):
490
- hiddens = []
491
- intermediates = []
492
- prev_attn = None
493
- prev_cross_attn = None
494
-
495
- mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers
496
-
497
- for ind, (layer_type, (norm, block, residual_fn)) in enumerate(zip(self.layer_types, self.layers)):
498
- is_last = ind == (len(self.layers) - 1)
499
-
500
- if layer_type == 'a':
501
- hiddens.append(x)
502
- layer_mem = mems.pop(0)
503
-
504
- residual = x
505
-
506
- if self.pre_norm:
507
- x = norm(x)
508
-
509
- if layer_type == 'a':
510
- out, inter = block(x, mask=mask, sinusoidal_emb=self.pia_pos_emb, rel_pos=self.rel_pos,
511
- prev_attn=prev_attn, mem=layer_mem)
512
- elif layer_type == 'c':
513
- out, inter = block(x, context=context, mask=mask, context_mask=context_mask, prev_attn=prev_cross_attn)
514
- elif layer_type == 'f':
515
- out = block(x)
516
-
517
- x = residual_fn(out, residual)
518
-
519
- if layer_type in ('a', 'c'):
520
- intermediates.append(inter)
521
-
522
- if layer_type == 'a' and self.residual_attn:
523
- prev_attn = inter.pre_softmax_attn
524
- elif layer_type == 'c' and self.cross_residual_attn:
525
- prev_cross_attn = inter.pre_softmax_attn
526
-
527
- if not self.pre_norm and not is_last:
528
- x = norm(x)
529
-
530
- if return_hiddens:
531
- intermediates = LayerIntermediates(
532
- hiddens=hiddens,
533
- attn_intermediates=intermediates
534
- )
535
-
536
- return x, intermediates
537
-
538
- return x
539
-
540
-
541
- class Encoder(AttentionLayers):
542
- def __init__(self, **kwargs):
543
- assert 'causal' not in kwargs, 'cannot set causality on encoder'
544
- super().__init__(causal=False, **kwargs)
545
-
546
-
547
-
548
- class TransformerWrapper(nn.Module):
549
- def __init__(
550
- self,
551
- *,
552
- num_tokens,
553
- max_seq_len,
554
- attn_layers,
555
- emb_dim=None,
556
- max_mem_len=0.,
557
- emb_dropout=0.,
558
- num_memory_tokens=None,
559
- tie_embedding=False,
560
- use_pos_emb=True
561
- ):
562
- super().__init__()
563
- assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
564
-
565
- dim = attn_layers.dim
566
- emb_dim = default(emb_dim, dim)
567
-
568
- self.max_seq_len = max_seq_len
569
- self.max_mem_len = max_mem_len
570
- self.num_tokens = num_tokens
571
-
572
- self.token_emb = nn.Embedding(num_tokens, emb_dim)
573
- self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len) if (
574
- use_pos_emb and not attn_layers.has_pos_emb) else always(0)
575
- self.emb_dropout = nn.Dropout(emb_dropout)
576
-
577
- self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity()
578
- self.attn_layers = attn_layers
579
- self.norm = nn.LayerNorm(dim)
580
-
581
- self.init_()
582
-
583
- self.to_logits = nn.Linear(dim, num_tokens) if not tie_embedding else lambda t: t @ self.token_emb.weight.t()
584
-
585
- # memory tokens (like [cls]) from Memory Transformers paper
586
- num_memory_tokens = default(num_memory_tokens, 0)
587
- self.num_memory_tokens = num_memory_tokens
588
- if num_memory_tokens > 0:
589
- self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim))
590
-
591
- # let funnel encoder know number of memory tokens, if specified
592
- if hasattr(attn_layers, 'num_memory_tokens'):
593
- attn_layers.num_memory_tokens = num_memory_tokens
594
-
595
- def init_(self):
596
- nn.init.normal_(self.token_emb.weight, std=0.02)
597
-
598
- def forward(
599
- self,
600
- x,
601
- return_embeddings=False,
602
- mask=None,
603
- return_mems=False,
604
- return_attn=False,
605
- mems=None,
606
- **kwargs
607
- ):
608
- b, n, device, num_mem = *x.shape, x.device, self.num_memory_tokens
609
- x = self.token_emb(x)
610
- x += self.pos_emb(x)
611
- x = self.emb_dropout(x)
612
-
613
- x = self.project_emb(x)
614
-
615
- if num_mem > 0:
616
- mem = repeat(self.memory_tokens, 'n d -> b n d', b=b)
617
- x = torch.cat((mem, x), dim=1)
618
-
619
- # auto-handle masking after appending memory tokens
620
- if exists(mask):
621
- mask = F.pad(mask, (num_mem, 0), value=True)
622
-
623
- x, intermediates = self.attn_layers(x, mask=mask, mems=mems, return_hiddens=True, **kwargs)
624
- x = self.norm(x)
625
-
626
- mem, x = x[:, :num_mem], x[:, num_mem:]
627
-
628
- out = self.to_logits(x) if not return_embeddings else x
629
-
630
- if return_mems:
631
- hiddens = intermediates.hiddens
632
- new_mems = list(map(lambda pair: torch.cat(pair, dim=-2), zip(mems, hiddens))) if exists(mems) else hiddens
633
- new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems))
634
- return out, new_mems
635
-
636
- if return_attn:
637
- attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
638
- return out, attn_maps
639
-
640
- return out
641
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGText/GlyphControl/annotator/util.py DELETED
@@ -1,38 +0,0 @@
1
- import numpy as np
2
- import cv2
3
- import os
4
-
5
-
6
- annotator_ckpts_path = os.path.join(os.path.dirname(__file__), 'ckpts')
7
-
8
-
9
- def HWC3(x):
10
- assert x.dtype == np.uint8
11
- if x.ndim == 2:
12
- x = x[:, :, None]
13
- assert x.ndim == 3
14
- H, W, C = x.shape
15
- assert C == 1 or C == 3 or C == 4
16
- if C == 3:
17
- return x
18
- if C == 1:
19
- return np.concatenate([x, x, x], axis=2)
20
- if C == 4:
21
- color = x[:, :, 0:3].astype(np.float32)
22
- alpha = x[:, :, 3:4].astype(np.float32) / 255.0
23
- y = color * alpha + 255.0 * (1.0 - alpha)
24
- y = y.clip(0, 255).astype(np.uint8)
25
- return y
26
-
27
-
28
- def resize_image(input_image, resolution):
29
- H, W, C = input_image.shape
30
- H = float(H)
31
- W = float(W)
32
- k = float(resolution) / min(H, W)
33
- H *= k
34
- W *= k
35
- H = int(np.round(H / 64.0)) * 64
36
- W = int(np.round(W / 64.0)) * 64
37
- img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
38
- return img
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIatUIUC/CodeLATS/executors/executor_types.py DELETED
@@ -1,20 +0,0 @@
1
- from typing import NamedTuple, List, Tuple
2
- from abc import ABC, abstractmethod
3
-
4
- class ExecuteResult(NamedTuple):
5
- is_passing: bool
6
- feedback: str
7
- state: Tuple[bool]
8
-
9
- class Executor(ABC):
10
- @abstractmethod
11
- def execute(self, func: str, tests: List[str], timeout: int = 5) -> ExecuteResult:
12
- ...
13
-
14
- @abstractmethod
15
- def evaluate(self, name: str, func: str, test: str, timeout: int = 5) -> bool:
16
- ...
17
-
18
-
19
-
20
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/crowdhuman/yolov5_s-v61_fast_8xb16-300e_crowdhuman.py DELETED
@@ -1,47 +0,0 @@
1
- _base_ = '../yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py'
2
-
3
- # Use the model trained on the COCO as the pretrained model
4
- load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700-86e02187.pth' # noqa
5
-
6
- # dataset settings
7
- data_root = 'data/CrowdHuman/'
8
- dataset_type = 'YOLOv5CrowdHumanDataset'
9
-
10
- # parameters that often need to be modified
11
- num_classes = 1
12
-
13
- anchors = [
14
- [(6, 14), (12, 28), (19, 48)], # P3/8
15
- [(29, 79), (46, 124), (142, 54)], # P4/16
16
- [(73, 198), (124, 330), (255, 504)] # P5/32
17
- ]
18
-
19
- model = dict(
20
- bbox_head=dict(
21
- head_module=dict(num_classes=num_classes),
22
- prior_generator=dict(base_sizes=anchors)))
23
-
24
- train_dataloader = dict(
25
- dataset=dict(
26
- type=dataset_type,
27
- data_root=data_root,
28
- ann_file='annotation_train.odgt',
29
- data_prefix=dict(img='Images/')))
30
-
31
- val_dataloader = dict(
32
- dataset=dict(
33
- type=dataset_type,
34
- data_root=data_root,
35
- ann_file='annotation_val.odgt',
36
- data_prefix=dict(img='Images/'),
37
- # CrowdHumanMetric does not support out-of-order output images
38
- # for the time being. batch_shapes_cfg does not support.
39
- batch_shapes_cfg=None))
40
- test_dataloader = val_dataloader
41
-
42
- val_evaluator = dict(
43
- _delete_=True,
44
- type='mmdet.CrowdHumanMetric',
45
- ann_file=data_root + 'annotation_val.odgt',
46
- metric=['AP', 'MR', 'JI'])
47
- test_evaluator = val_evaluator
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/selector/classroom.py DELETED
@@ -1,47 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import TYPE_CHECKING, List
4
-
5
- from agentverse.message import Message
6
-
7
- from . import selector_registry as SelectorRegistry
8
- from .base import BaseSelector
9
-
10
- if TYPE_CHECKING:
11
- from agentverse.environments import BaseEnvironment
12
-
13
-
14
- @SelectorRegistry.register("classroom")
15
- class ClassroomSelector(BaseSelector):
16
- def select_message(
17
- self, environment: BaseEnvironment, messages: List[Message]
18
- ) -> List[Message]:
19
- selected = []
20
- for message in messages:
21
- if message.sender.startswith("Student"):
22
- if message.content.startswith("[RaiseHand]"):
23
- message.content = "[RaiseHand]"
24
- selected.append(message)
25
- elif message.content != "" or len(message.tool_response) > 0:
26
- selected.append(message)
27
- elif message.sender.startswith("Professor"):
28
- # If the professor launch a group discussion, then we
29
- # brutely discard the student's message in this turn
30
- if message.content.startswith("[GroupDiscuss]"):
31
- return [message]
32
- selected.append(message)
33
-
34
- # If some student speak while the professor is speaking, then
35
- # we brutely discard the student's message in this turn
36
- if (
37
- len(selected) > 1
38
- and selected[0].sender.startswith("Professor")
39
- and selected[0].content != ""
40
- ):
41
- filtered_selected = []
42
- filtered_selected.append(selected[0])
43
- for message in selected[1:]:
44
- if message.content.startswith("[RaiseHand]"):
45
- filtered_selected.append(message)
46
- selected = filtered_selected
47
- return selected
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/custom/Custom.js DELETED
@@ -1,18 +0,0 @@
1
- import Base from '../base/Base.js';
2
- import ShapesUpdateMethods from '../../../plugins/gameobjects/shape/customshapes/ShapesUpdateMethods.js';
3
-
4
- const GetValue = Phaser.Utils.Objects.GetValue;
5
-
6
- class Custom extends Base {
7
- constructor(scene, config) {
8
- super(scene, config);
9
- this.type = GetValue(config, 'type', 'rexSpinnerCustom');
10
- }
11
- }
12
-
13
- Object.assign(
14
- Custom.prototype,
15
- ShapesUpdateMethods
16
- );
17
-
18
- export default Custom;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridtable/input/SwipeCell.js DELETED
@@ -1,26 +0,0 @@
1
- import Swipe from '../../swipe/Swipe.js';
2
- import EmitCellEvent from './EmitCellEvent.js';
3
-
4
- const GetValue = Phaser.Utils.Objects.GetValue;
5
-
6
- var SwipeCell = function (table, tableConfig) {
7
- var swipeConfig = GetValue(tableConfig, 'swipe', undefined);
8
- if (swipeConfig === false) {
9
- return;
10
- } else if (swipeConfig === undefined) {
11
- swipeConfig = {};
12
- }
13
- swipeConfig.dir = '4dir';
14
- table._swipe = new Swipe(table, swipeConfig);
15
- table._swipe
16
- .on('swipe', function (swipe, gameObject, lastPointer) {
17
- var dirName =
18
- (swipe.left) ? 'left' :
19
- (swipe.right) ? 'right' :
20
- (swipe.up) ? 'up' :
21
- 'down';
22
- EmitCellEvent(this.eventEmitter, `cell.swipe${dirName}`, table, swipe.worldX, swipe.worldY, lastPointer);
23
- }, this)
24
- };
25
-
26
- export default SwipeCell;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sides/Sides.js DELETED
@@ -1,84 +0,0 @@
1
- import OverlapSizer from '../overlapsizer/OverlapSizer.js';
2
- import IsFunction from '../../../plugins/utils/object/IsFunction.js';
3
- import GetDefaultCallbacks from './defaultcallbacks/GetDefaultCallbacks.js';
4
- import ShowChildMethods from './ShowChildMethods.js';
5
- import ChildBehaviorMethods from './childbehaviors/index.js';
6
-
7
- const GetValue = Phaser.Utils.Objects.GetValue;
8
-
9
- class Sides extends OverlapSizer {
10
- constructor(scene, config) {
11
- super(scene, config);
12
- this.type = 'rexSides';
13
- this.childrenMap = this.sizerChildren;
14
- this.previousChildKey = undefined;
15
- this.currentChildKey = undefined;
16
-
17
- // Callbacks
18
- var showChildCallback = GetValue(config, 'showChildCallback', undefined);
19
- if (showChildCallback) { // Has showChildCallback, and hideChildCallback
20
- if (IsFunction(showChildCallback)) { // Custom callbacks
21
- var showChildCallbackScope = GetValue(config, 'showChildCallbackScope', undefined);
22
- this.on('showchild', showChildCallback, showChildCallbackScope);
23
-
24
- var hideChildCallback = GetValue(config, 'hideChildCallback', undefined);
25
- var hideChildCallbackScope = GetValue(config, 'hideChildCallbackScope', undefined);
26
- this.on('hidechild', hideChildCallback, hideChildCallbackScope);
27
- } else { // Default callbacks
28
- var defaultCallbacks = GetDefaultCallbacks(showChildCallback);
29
- this.on('showchild', defaultCallbacks.show);
30
- this.on('hidechild', defaultCallbacks.hide);
31
- }
32
- }
33
-
34
- // Add elements
35
- var background = GetValue(config, 'background', undefined);
36
- var panel = GetValue(config, 'panel', undefined);
37
- var leftSide = GetValue(config, 'leftSide', undefined);
38
- var rightSide = GetValue(config, 'rightSide', undefined);
39
- var topSide = GetValue(config, 'topSide', undefined);
40
- var bottomSide = GetValue(config, 'bottomSide', undefined);
41
-
42
- if (background) {
43
- this.addBackground(background);
44
- }
45
- if (panel) {
46
- this.add(panel, 'panel', 'center', 0, true);
47
- }
48
- if (leftSide) {
49
- var expand = GetValue(config, 'expand.left', true);
50
- this.add(leftSide, 'leftSide', 'left-top', 0, { height: expand });
51
- }
52
- if (rightSide) {
53
- var expand = GetValue(config, 'expand.right', true);
54
- this.add(rightSide, 'rightSide', 'right-top', 0, { height: expand });
55
- }
56
- if (topSide) {
57
- var expand = GetValue(config, 'expand.top', true);
58
- this.add(topSide, 'topSide', 'left-top', 0, { width: expand });
59
- }
60
- if (bottomSide) {
61
- var expand = GetValue(config, 'expand.bottom', true);
62
- this.add(bottomSide, 'bottomSide', 'left-bottom', 0, { width: expand });
63
- }
64
- }
65
-
66
- reset() {
67
- this.previousChildKey = undefined;
68
- this.currentChildKey = 'panel';
69
- this.showChild('panel', true);
70
- this.hideChild('leftSide', true);
71
- this.hideChild('rightSide', true);
72
- this.hideChild('topSide', true);
73
- this.hideChild('bottomSide', true);
74
- return this;
75
- }
76
- }
77
-
78
- Object.assign(
79
- Sides.prototype,
80
- ShowChildMethods,
81
- ChildBehaviorMethods
82
- );
83
-
84
- export default Sides;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/face3d/visualize.py DELETED
@@ -1,48 +0,0 @@
1
- # check the sync of 3dmm feature and the audio
2
- import cv2
3
- import numpy as np
4
- from src.face3d.models.bfm import ParametricFaceModel
5
- from src.face3d.models.facerecon_model import FaceReconModel
6
- import torch
7
- import subprocess, platform
8
- import scipy.io as scio
9
- from tqdm import tqdm
10
-
11
- # draft
12
- def gen_composed_video(args, device, first_frame_coeff, coeff_path, audio_path, save_path, exp_dim=64):
13
-
14
- coeff_first = scio.loadmat(first_frame_coeff)['full_3dmm']
15
-
16
- coeff_pred = scio.loadmat(coeff_path)['coeff_3dmm']
17
-
18
- coeff_full = np.repeat(coeff_first, coeff_pred.shape[0], axis=0) # 257
19
-
20
- coeff_full[:, 80:144] = coeff_pred[:, 0:64]
21
- coeff_full[:, 224:227] = coeff_pred[:, 64:67] # 3 dim translation
22
- coeff_full[:, 254:] = coeff_pred[:, 67:] # 3 dim translation
23
-
24
- tmp_video_path = '/tmp/face3dtmp.mp4'
25
-
26
- facemodel = FaceReconModel(args)
27
-
28
- video = cv2.VideoWriter(tmp_video_path, cv2.VideoWriter_fourcc(*'mp4v'), 25, (224, 224))
29
-
30
- for k in tqdm(range(coeff_pred.shape[0]), 'face3d rendering:'):
31
- cur_coeff_full = torch.tensor(coeff_full[k:k+1], device=device)
32
-
33
- facemodel.forward(cur_coeff_full, device)
34
-
35
- predicted_landmark = facemodel.pred_lm # TODO.
36
- predicted_landmark = predicted_landmark.cpu().numpy().squeeze()
37
-
38
- rendered_img = facemodel.pred_face
39
- rendered_img = 255. * rendered_img.cpu().numpy().squeeze().transpose(1,2,0)
40
- out_img = rendered_img[:, :, :3].astype(np.uint8)
41
-
42
- video.write(np.uint8(out_img[:,:,::-1]))
43
-
44
- video.release()
45
-
46
- command = 'ffmpeg -v quiet -y -i {} -i {} -strict -2 -q:v 1 {}'.format(audio_path, tmp_video_path, save_path)
47
- subprocess.call(command, shell=platform.system() != 'Windows')
48
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/python/dqn/policies.py DELETED
@@ -1,237 +0,0 @@
1
- from typing import Any, Dict, List, Optional, Type
2
-
3
- import gym
4
- import torch as th
5
- from torch import nn
6
-
7
- from stable_baselines3.common.policies import BasePolicy, register_policy
8
- from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor, NatureCNN, create_mlp
9
- from stable_baselines3.common.type_aliases import Schedule
10
-
11
-
12
- class QNetwork(BasePolicy):
13
- """
14
- Action-Value (Q-Value) network for DQN
15
-
16
- :param observation_space: Observation space
17
- :param action_space: Action space
18
- :param net_arch: The specification of the policy and value networks.
19
- :param activation_fn: Activation function
20
- :param normalize_images: Whether to normalize images or not,
21
- dividing by 255.0 (True by default)
22
- """
23
-
24
- def __init__(
25
- self,
26
- observation_space: gym.spaces.Space,
27
- action_space: gym.spaces.Space,
28
- features_extractor: nn.Module,
29
- features_dim: int,
30
- net_arch: Optional[List[int]] = None,
31
- activation_fn: Type[nn.Module] = nn.ReLU,
32
- normalize_images: bool = True,
33
- ):
34
- super(QNetwork, self).__init__(
35
- observation_space,
36
- action_space,
37
- features_extractor=features_extractor,
38
- normalize_images=normalize_images,
39
- )
40
-
41
- if net_arch is None:
42
- net_arch = [64, 64]
43
-
44
- self.net_arch = net_arch
45
- self.activation_fn = activation_fn
46
- self.features_extractor = features_extractor
47
- self.features_dim = features_dim
48
- self.normalize_images = normalize_images
49
- action_dim = self.action_space.n # number of actions
50
- q_net = create_mlp(self.features_dim, action_dim, self.net_arch, self.activation_fn)
51
- self.q_net = nn.Sequential(*q_net)
52
-
53
- def forward(self, obs: th.Tensor) -> th.Tensor:
54
- """
55
- Predict the q-values.
56
-
57
- :param obs: Observation
58
- :return: The estimated Q-Value for each action.
59
- """
60
- return self.q_net(self.extract_features(obs))
61
-
62
- def _predict(self, observation: th.Tensor, deterministic: bool = True) -> th.Tensor:
63
- q_values = self.forward(observation)
64
- # Greedy action
65
- action = q_values.argmax(dim=1).reshape(-1)
66
- return action
67
-
68
- def _get_constructor_parameters(self) -> Dict[str, Any]:
69
- data = super()._get_constructor_parameters()
70
-
71
- data.update(
72
- dict(
73
- net_arch=self.net_arch,
74
- features_dim=self.features_dim,
75
- activation_fn=self.activation_fn,
76
- features_extractor=self.features_extractor,
77
- )
78
- )
79
- return data
80
-
81
-
82
- class DQNPolicy(BasePolicy):
83
- """
84
- Policy class with Q-Value Net and target net for DQN
85
-
86
- :param observation_space: Observation space
87
- :param action_space: Action space
88
- :param lr_schedule: Learning rate schedule (could be constant)
89
- :param net_arch: The specification of the policy and value networks.
90
- :param activation_fn: Activation function
91
- :param features_extractor_class: Features extractor to use.
92
- :param features_extractor_kwargs: Keyword arguments
93
- to pass to the features extractor.
94
- :param normalize_images: Whether to normalize images or not,
95
- dividing by 255.0 (True by default)
96
- :param optimizer_class: The optimizer to use,
97
- ``th.optim.Adam`` by default
98
- :param optimizer_kwargs: Additional keyword arguments,
99
- excluding the learning rate, to pass to the optimizer
100
- """
101
-
102
- def __init__(
103
- self,
104
- observation_space: gym.spaces.Space,
105
- action_space: gym.spaces.Space,
106
- lr_schedule: Schedule,
107
- net_arch: Optional[List[int]] = None,
108
- activation_fn: Type[nn.Module] = nn.ReLU,
109
- features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
110
- features_extractor_kwargs: Optional[Dict[str, Any]] = None,
111
- normalize_images: bool = True,
112
- optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
113
- optimizer_kwargs: Optional[Dict[str, Any]] = None,
114
- ):
115
- super(DQNPolicy, self).__init__(
116
- observation_space,
117
- action_space,
118
- features_extractor_class,
119
- features_extractor_kwargs,
120
- optimizer_class=optimizer_class,
121
- optimizer_kwargs=optimizer_kwargs,
122
- )
123
-
124
- if net_arch is None:
125
- if features_extractor_class == FlattenExtractor:
126
- net_arch = [64, 64]
127
- else:
128
- net_arch = []
129
-
130
- self.net_arch = net_arch
131
- self.activation_fn = activation_fn
132
- self.normalize_images = normalize_images
133
-
134
- self.net_args = {
135
- "observation_space": self.observation_space,
136
- "action_space": self.action_space,
137
- "net_arch": self.net_arch,
138
- "activation_fn": self.activation_fn,
139
- "normalize_images": normalize_images,
140
- }
141
-
142
- self.q_net, self.q_net_target = None, None
143
- self._build(lr_schedule)
144
-
145
- def _build(self, lr_schedule: Schedule) -> None:
146
- """
147
- Create the network and the optimizer.
148
-
149
- :param lr_schedule: Learning rate schedule
150
- lr_schedule(1) is the initial learning rate
151
- """
152
-
153
- self.q_net = self.make_q_net()
154
- self.q_net_target = self.make_q_net()
155
- self.q_net_target.load_state_dict(self.q_net.state_dict())
156
-
157
- # Setup optimizer with initial learning rate
158
- self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
159
-
160
- def make_q_net(self) -> QNetwork:
161
- # Make sure we always have separate networks for features extractors etc
162
- net_args = self._update_features_extractor(self.net_args, features_extractor=None)
163
- return QNetwork(**net_args).to(self.device)
164
-
165
- def forward(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:
166
- return self._predict(obs, deterministic=deterministic)
167
-
168
- def _predict(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:
169
- return self.q_net._predict(obs, deterministic=deterministic)
170
-
171
- def _get_constructor_parameters(self) -> Dict[str, Any]:
172
- data = super()._get_constructor_parameters()
173
-
174
- data.update(
175
- dict(
176
- net_arch=self.net_args["net_arch"],
177
- activation_fn=self.net_args["activation_fn"],
178
- lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone
179
- optimizer_class=self.optimizer_class,
180
- optimizer_kwargs=self.optimizer_kwargs,
181
- features_extractor_class=self.features_extractor_class,
182
- features_extractor_kwargs=self.features_extractor_kwargs,
183
- )
184
- )
185
- return data
186
-
187
-
188
- MlpPolicy = DQNPolicy
189
-
190
-
191
- class CnnPolicy(DQNPolicy):
192
- """
193
- Policy class for DQN when using images as input.
194
-
195
- :param observation_space: Observation space
196
- :param action_space: Action space
197
- :param lr_schedule: Learning rate schedule (could be constant)
198
- :param net_arch: The specification of the policy and value networks.
199
- :param activation_fn: Activation function
200
- :param features_extractor_class: Features extractor to use.
201
- :param normalize_images: Whether to normalize images or not,
202
- dividing by 255.0 (True by default)
203
- :param optimizer_class: The optimizer to use,
204
- ``th.optim.Adam`` by default
205
- :param optimizer_kwargs: Additional keyword arguments,
206
- excluding the learning rate, to pass to the optimizer
207
- """
208
-
209
- def __init__(
210
- self,
211
- observation_space: gym.spaces.Space,
212
- action_space: gym.spaces.Space,
213
- lr_schedule: Schedule,
214
- net_arch: Optional[List[int]] = None,
215
- activation_fn: Type[nn.Module] = nn.ReLU,
216
- features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
217
- features_extractor_kwargs: Optional[Dict[str, Any]] = None,
218
- normalize_images: bool = True,
219
- optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
220
- optimizer_kwargs: Optional[Dict[str, Any]] = None,
221
- ):
222
- super(CnnPolicy, self).__init__(
223
- observation_space,
224
- action_space,
225
- lr_schedule,
226
- net_arch,
227
- activation_fn,
228
- features_extractor_class,
229
- features_extractor_kwargs,
230
- normalize_images,
231
- optimizer_class,
232
- optimizer_kwargs,
233
- )
234
-
235
-
236
- register_policy("MlpPolicy", MlpPolicy)
237
- register_policy("CnnPolicy", CnnPolicy)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andres99/Tune-A-Video-Training-UI/app_upload.py DELETED
@@ -1,106 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- from __future__ import annotations
4
-
5
- import pathlib
6
-
7
- import gradio as gr
8
- import slugify
9
-
10
- from constants import MODEL_LIBRARY_ORG_NAME, UploadTarget
11
- from uploader import Uploader
12
- from utils import find_exp_dirs
13
-
14
-
15
- class ModelUploader(Uploader):
16
- def upload_model(
17
- self,
18
- folder_path: str,
19
- repo_name: str,
20
- upload_to: str,
21
- private: bool,
22
- delete_existing_repo: bool,
23
- input_token: str | None = None,
24
- ) -> str:
25
- if not folder_path:
26
- raise ValueError
27
- if not repo_name:
28
- repo_name = pathlib.Path(folder_path).name
29
- repo_name = slugify.slugify(repo_name)
30
-
31
- if upload_to == UploadTarget.PERSONAL_PROFILE.value:
32
- organization = ''
33
- elif upload_to == UploadTarget.MODEL_LIBRARY.value:
34
- organization = MODEL_LIBRARY_ORG_NAME
35
- else:
36
- raise ValueError
37
-
38
- return self.upload(folder_path,
39
- repo_name,
40
- organization=organization,
41
- private=private,
42
- delete_existing_repo=delete_existing_repo,
43
- input_token=input_token)
44
-
45
-
46
- def load_local_model_list() -> dict:
47
- choices = find_exp_dirs()
48
- return gr.update(choices=choices, value=choices[0] if choices else None)
49
-
50
-
51
- def create_upload_demo(hf_token: str | None) -> gr.Blocks:
52
- uploader = ModelUploader(hf_token)
53
- model_dirs = find_exp_dirs()
54
-
55
- with gr.Blocks() as demo:
56
- with gr.Box():
57
- gr.Markdown('Local Models')
58
- reload_button = gr.Button('Reload Model List')
59
- model_dir = gr.Dropdown(
60
- label='Model names',
61
- choices=model_dirs,
62
- value=model_dirs[0] if model_dirs else None)
63
- with gr.Box():
64
- gr.Markdown('Upload Settings')
65
- with gr.Row():
66
- use_private_repo = gr.Checkbox(label='Private', value=True)
67
- delete_existing_repo = gr.Checkbox(
68
- label='Delete existing repo of the same name', value=False)
69
- upload_to = gr.Radio(label='Upload to',
70
- choices=[_.value for _ in UploadTarget],
71
- value=UploadTarget.MODEL_LIBRARY.value)
72
- model_name = gr.Textbox(label='Model Name')
73
- input_token = gr.Text(label='Hugging Face Write Token',
74
- placeholder='',
75
- visible=False if hf_token else True)
76
- upload_button = gr.Button('Upload')
77
- gr.Markdown(f'''
78
- - You can upload your trained model to your personal profile (i.e. https://huggingface.co/{{your_username}}/{{model_name}}) or to the public [Tune-A-Video Library](https://huggingface.co/{MODEL_LIBRARY_ORG_NAME}) (i.e. https://huggingface.co/{MODEL_LIBRARY_ORG_NAME}/{{model_name}}).
79
- ''')
80
- with gr.Box():
81
- gr.Markdown('Output message')
82
- output_message = gr.Markdown()
83
-
84
- reload_button.click(fn=load_local_model_list,
85
- inputs=None,
86
- outputs=model_dir)
87
- upload_button.click(fn=uploader.upload_model,
88
- inputs=[
89
- model_dir,
90
- model_name,
91
- upload_to,
92
- use_private_repo,
93
- delete_existing_repo,
94
- input_token,
95
- ],
96
- outputs=output_message)
97
-
98
- return demo
99
-
100
-
101
- if __name__ == '__main__':
102
- import os
103
-
104
- hf_token = os.getenv('HF_TOKEN')
105
- demo = create_upload_demo(hf_token)
106
- demo.queue(max_size=1).launch(share=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/control_brightness.md DELETED
@@ -1,45 +0,0 @@
1
- # Control image brightness
2
-
3
- The Stable Diffusion pipeline is mediocre at generating images that are either very bright or dark as explained in the [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) paper. The solutions proposed in the paper are currently implemented in the [`DDIMScheduler`] which you can use to improve the lighting in your images.
4
-
5
- <Tip>
6
-
7
- 💡 Take a look at the paper linked above for more details about the proposed solutions!
8
-
9
- </Tip>
10
-
11
- One of the solutions is to train a model with *v prediction* and *v loss*. Add the following flag to the [`train_text_to_image.py`](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py) or [`train_text_to_image_lora.py`](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py) scripts to enable `v_prediction`:
12
-
13
- ```bash
14
- --prediction_type="v_prediction"
15
- ```
16
-
17
- For example, let's use the [`ptx0/pseudo-journey-v2`](https://huggingface.co/ptx0/pseudo-journey-v2) checkpoint which has been finetuned with `v_prediction`.
18
-
19
- Next, configure the following parameters in the [`DDIMScheduler`]:
20
-
21
- 1. `rescale_betas_zero_snr=True`, rescales the noise schedule to zero terminal signal-to-noise ratio (SNR)
22
- 2. `timestep_spacing="trailing"`, starts sampling from the last timestep
23
-
24
- ```py
25
- >>> from diffusers import DiffusionPipeline, DDIMScheduler
26
-
27
- >>> pipeline = DiffusionPipeline.from_pretrained("ptx0/pseudo-journey-v2")
28
- # switch the scheduler in the pipeline to use the DDIMScheduler
29
-
30
- >>> pipeline.scheduler = DDIMScheduler.from_config(
31
- ... pipeline.scheduler.config, rescale_betas_zero_snr=True, timestep_spacing="trailing"
32
- ... )
33
- >>> pipeline.to("cuda")
34
- ```
35
-
36
- Finally, in your call to the pipeline, set `guidance_rescale` to prevent overexposure:
37
-
38
- ```py
39
- prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k"
40
- image = pipeline(prompt, guidance_rescale=0.7).images[0]
41
- ```
42
-
43
- <div class="flex justify-center">
44
- <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/zero_snr.png"/>
45
- </div>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/inpaint.md DELETED
@@ -1,76 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # Text-guided image-inpainting
14
-
15
- [[open-in-colab]]
16
-
17
- The [`StableDiffusionInpaintPipeline`] allows you to edit specific parts of an image by providing a mask and a text prompt. It uses a version of Stable Diffusion, like [`runwayml/stable-diffusion-inpainting`](https://huggingface.co/runwayml/stable-diffusion-inpainting) specifically trained for inpainting tasks.
18
-
19
- Get started by loading an instance of the [`StableDiffusionInpaintPipeline`]:
20
-
21
- ```python
22
- import PIL
23
- import requests
24
- import torch
25
- from io import BytesIO
26
-
27
- from diffusers import StableDiffusionInpaintPipeline
28
-
29
- pipeline = StableDiffusionInpaintPipeline.from_pretrained(
30
- "runwayml/stable-diffusion-inpainting",
31
- torch_dtype=torch.float16,
32
- )
33
- pipeline = pipeline.to("cuda")
34
- ```
35
-
36
- Download an image and a mask of a dog which you'll eventually replace:
37
-
38
- ```python
39
- def download_image(url):
40
- response = requests.get(url)
41
- return PIL.Image.open(BytesIO(response.content)).convert("RGB")
42
-
43
-
44
- img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
45
- mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
46
-
47
- init_image = download_image(img_url).resize((512, 512))
48
- mask_image = download_image(mask_url).resize((512, 512))
49
- ```
50
-
51
- Now you can create a prompt to replace the mask with something else:
52
-
53
- ```python
54
- prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
55
- image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image).images[0]
56
- ```
57
-
58
- `image` | `mask_image` | `prompt` | output |
59
- :-------------------------:|:-------------------------:|:-------------------------:|-------------------------:|
60
- <img src="https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" alt="drawing" width="250"/> | <img src="https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" alt="drawing" width="250"/> | ***Face of a yellow cat, high resolution, sitting on a park bench*** | <img src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/in_paint/yellow_cat_sitting_on_a_park_bench.png" alt="drawing" width="250"/> |
61
-
62
-
63
- <Tip warning={true}>
64
-
65
- A previous experimental implementation of inpainting used a different, lower-quality process. To ensure backwards compatibility, loading a pretrained pipeline that doesn't contain the new model will still apply the old inpainting method.
66
-
67
- </Tip>
68
-
69
- Check out the Spaces below to try out image inpainting yourself!
70
-
71
- <iframe
72
- src="https://runwayml-stable-diffusion-inpainting.hf.space"
73
- frameborder="0"
74
- width="850"
75
- height="500"
76
- ></iframe>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_vq_diffusion.py DELETED
@@ -1,496 +0,0 @@
1
- # Copyright 2023 Microsoft and The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- from dataclasses import dataclass
16
- from typing import Optional, Tuple, Union
17
-
18
- import numpy as np
19
- import torch
20
- import torch.nn.functional as F
21
-
22
- from ..configuration_utils import ConfigMixin, register_to_config
23
- from ..utils import BaseOutput
24
- from .scheduling_utils import SchedulerMixin
25
-
26
-
27
- @dataclass
28
- class VQDiffusionSchedulerOutput(BaseOutput):
29
- """
30
- Output class for the scheduler's step function output.
31
-
32
- Args:
33
- prev_sample (`torch.LongTensor` of shape `(batch size, num latent pixels)`):
34
- Computed sample x_{t-1} of previous timestep. `prev_sample` should be used as next model input in the
35
- denoising loop.
36
- """
37
-
38
- prev_sample: torch.LongTensor
39
-
40
-
41
- def index_to_log_onehot(x: torch.LongTensor, num_classes: int) -> torch.FloatTensor:
42
- """
43
- Convert batch of vector of class indices into batch of log onehot vectors
44
-
45
- Args:
46
- x (`torch.LongTensor` of shape `(batch size, vector length)`):
47
- Batch of class indices
48
-
49
- num_classes (`int`):
50
- number of classes to be used for the onehot vectors
51
-
52
- Returns:
53
- `torch.FloatTensor` of shape `(batch size, num classes, vector length)`:
54
- Log onehot vectors
55
- """
56
- x_onehot = F.one_hot(x, num_classes)
57
- x_onehot = x_onehot.permute(0, 2, 1)
58
- log_x = torch.log(x_onehot.float().clamp(min=1e-30))
59
- return log_x
60
-
61
-
62
- def gumbel_noised(logits: torch.FloatTensor, generator: Optional[torch.Generator]) -> torch.FloatTensor:
63
- """
64
- Apply gumbel noise to `logits`
65
- """
66
- uniform = torch.rand(logits.shape, device=logits.device, generator=generator)
67
- gumbel_noise = -torch.log(-torch.log(uniform + 1e-30) + 1e-30)
68
- noised = gumbel_noise + logits
69
- return noised
70
-
71
-
72
- def alpha_schedules(num_diffusion_timesteps: int, alpha_cum_start=0.99999, alpha_cum_end=0.000009):
73
- """
74
- Cumulative and non-cumulative alpha schedules.
75
-
76
- See section 4.1.
77
- """
78
- att = (
79
- np.arange(0, num_diffusion_timesteps) / (num_diffusion_timesteps - 1) * (alpha_cum_end - alpha_cum_start)
80
- + alpha_cum_start
81
- )
82
- att = np.concatenate(([1], att))
83
- at = att[1:] / att[:-1]
84
- att = np.concatenate((att[1:], [1]))
85
- return at, att
86
-
87
-
88
- def gamma_schedules(num_diffusion_timesteps: int, gamma_cum_start=0.000009, gamma_cum_end=0.99999):
89
- """
90
- Cumulative and non-cumulative gamma schedules.
91
-
92
- See section 4.1.
93
- """
94
- ctt = (
95
- np.arange(0, num_diffusion_timesteps) / (num_diffusion_timesteps - 1) * (gamma_cum_end - gamma_cum_start)
96
- + gamma_cum_start
97
- )
98
- ctt = np.concatenate(([0], ctt))
99
- one_minus_ctt = 1 - ctt
100
- one_minus_ct = one_minus_ctt[1:] / one_minus_ctt[:-1]
101
- ct = 1 - one_minus_ct
102
- ctt = np.concatenate((ctt[1:], [0]))
103
- return ct, ctt
104
-
105
-
106
- class VQDiffusionScheduler(SchedulerMixin, ConfigMixin):
107
- """
108
- The VQ-diffusion transformer outputs predicted probabilities of the initial unnoised image.
109
-
110
- The VQ-diffusion scheduler converts the transformer's output into a sample for the unnoised image at the previous
111
- diffusion timestep.
112
-
113
- [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
114
- function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
115
- [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
116
- [`~SchedulerMixin.from_pretrained`] functions.
117
-
118
- For more details, see the original paper: https://arxiv.org/abs/2111.14822
119
-
120
- Args:
121
- num_vec_classes (`int`):
122
- The number of classes of the vector embeddings of the latent pixels. Includes the class for the masked
123
- latent pixel.
124
-
125
- num_train_timesteps (`int`):
126
- Number of diffusion steps used to train the model.
127
-
128
- alpha_cum_start (`float`):
129
- The starting cumulative alpha value.
130
-
131
- alpha_cum_end (`float`):
132
- The ending cumulative alpha value.
133
-
134
- gamma_cum_start (`float`):
135
- The starting cumulative gamma value.
136
-
137
- gamma_cum_end (`float`):
138
- The ending cumulative gamma value.
139
- """
140
-
141
- order = 1
142
-
143
- @register_to_config
144
- def __init__(
145
- self,
146
- num_vec_classes: int,
147
- num_train_timesteps: int = 100,
148
- alpha_cum_start: float = 0.99999,
149
- alpha_cum_end: float = 0.000009,
150
- gamma_cum_start: float = 0.000009,
151
- gamma_cum_end: float = 0.99999,
152
- ):
153
- self.num_embed = num_vec_classes
154
-
155
- # By convention, the index for the mask class is the last class index
156
- self.mask_class = self.num_embed - 1
157
-
158
- at, att = alpha_schedules(num_train_timesteps, alpha_cum_start=alpha_cum_start, alpha_cum_end=alpha_cum_end)
159
- ct, ctt = gamma_schedules(num_train_timesteps, gamma_cum_start=gamma_cum_start, gamma_cum_end=gamma_cum_end)
160
-
161
- num_non_mask_classes = self.num_embed - 1
162
- bt = (1 - at - ct) / num_non_mask_classes
163
- btt = (1 - att - ctt) / num_non_mask_classes
164
-
165
- at = torch.tensor(at.astype("float64"))
166
- bt = torch.tensor(bt.astype("float64"))
167
- ct = torch.tensor(ct.astype("float64"))
168
- log_at = torch.log(at)
169
- log_bt = torch.log(bt)
170
- log_ct = torch.log(ct)
171
-
172
- att = torch.tensor(att.astype("float64"))
173
- btt = torch.tensor(btt.astype("float64"))
174
- ctt = torch.tensor(ctt.astype("float64"))
175
- log_cumprod_at = torch.log(att)
176
- log_cumprod_bt = torch.log(btt)
177
- log_cumprod_ct = torch.log(ctt)
178
-
179
- self.log_at = log_at.float()
180
- self.log_bt = log_bt.float()
181
- self.log_ct = log_ct.float()
182
- self.log_cumprod_at = log_cumprod_at.float()
183
- self.log_cumprod_bt = log_cumprod_bt.float()
184
- self.log_cumprod_ct = log_cumprod_ct.float()
185
-
186
- # setable values
187
- self.num_inference_steps = None
188
- self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy())
189
-
190
- def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):
191
- """
192
- Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.
193
-
194
- Args:
195
- num_inference_steps (`int`):
196
- the number of diffusion steps used when generating samples with a pre-trained model.
197
-
198
- device (`str` or `torch.device`):
199
- device to place the timesteps and the diffusion process parameters (alpha, beta, gamma) on.
200
- """
201
- self.num_inference_steps = num_inference_steps
202
- timesteps = np.arange(0, self.num_inference_steps)[::-1].copy()
203
- self.timesteps = torch.from_numpy(timesteps).to(device)
204
-
205
- self.log_at = self.log_at.to(device)
206
- self.log_bt = self.log_bt.to(device)
207
- self.log_ct = self.log_ct.to(device)
208
- self.log_cumprod_at = self.log_cumprod_at.to(device)
209
- self.log_cumprod_bt = self.log_cumprod_bt.to(device)
210
- self.log_cumprod_ct = self.log_cumprod_ct.to(device)
211
-
212
- def step(
213
- self,
214
- model_output: torch.FloatTensor,
215
- timestep: torch.long,
216
- sample: torch.LongTensor,
217
- generator: Optional[torch.Generator] = None,
218
- return_dict: bool = True,
219
- ) -> Union[VQDiffusionSchedulerOutput, Tuple]:
220
- """
221
- Predict the sample at the previous timestep via the reverse transition distribution i.e. Equation (11). See the
222
- docstring for `self.q_posterior` for more in depth docs on how Equation (11) is computed.
223
-
224
- Args:
225
- log_p_x_0: (`torch.FloatTensor` of shape `(batch size, num classes - 1, num latent pixels)`):
226
- The log probabilities for the predicted classes of the initial latent pixels. Does not include a
227
- prediction for the masked class as the initial unnoised image cannot be masked.
228
-
229
- t (`torch.long`):
230
- The timestep that determines which transition matrices are used.
231
-
232
- x_t: (`torch.LongTensor` of shape `(batch size, num latent pixels)`):
233
- The classes of each latent pixel at time `t`
234
-
235
- generator: (`torch.Generator` or None):
236
- RNG for the noise applied to p(x_{t-1} | x_t) before it is sampled from.
237
-
238
- return_dict (`bool`):
239
- option for returning tuple rather than VQDiffusionSchedulerOutput class
240
-
241
- Returns:
242
- [`~schedulers.scheduling_utils.VQDiffusionSchedulerOutput`] or `tuple`:
243
- [`~schedulers.scheduling_utils.VQDiffusionSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`.
244
- When returning a tuple, the first element is the sample tensor.
245
- """
246
- if timestep == 0:
247
- log_p_x_t_min_1 = model_output
248
- else:
249
- log_p_x_t_min_1 = self.q_posterior(model_output, sample, timestep)
250
-
251
- log_p_x_t_min_1 = gumbel_noised(log_p_x_t_min_1, generator)
252
-
253
- x_t_min_1 = log_p_x_t_min_1.argmax(dim=1)
254
-
255
- if not return_dict:
256
- return (x_t_min_1,)
257
-
258
- return VQDiffusionSchedulerOutput(prev_sample=x_t_min_1)
259
-
260
- def q_posterior(self, log_p_x_0, x_t, t):
261
- """
262
- Calculates the log probabilities for the predicted classes of the image at timestep `t-1`. I.e. Equation (11).
263
-
264
- Instead of directly computing equation (11), we use Equation (5) to restate Equation (11) in terms of only
265
- forward probabilities.
266
-
267
- Equation (11) stated in terms of forward probabilities via Equation (5):
268
-
269
- Where:
270
- - the sum is over x_0 = {C_0 ... C_{k-1}} (classes for x_0)
271
-
272
- p(x_{t-1} | x_t) = sum( q(x_t | x_{t-1}) * q(x_{t-1} | x_0) * p(x_0) / q(x_t | x_0) )
273
-
274
- Args:
275
- log_p_x_0: (`torch.FloatTensor` of shape `(batch size, num classes - 1, num latent pixels)`):
276
- The log probabilities for the predicted classes of the initial latent pixels. Does not include a
277
- prediction for the masked class as the initial unnoised image cannot be masked.
278
-
279
- x_t: (`torch.LongTensor` of shape `(batch size, num latent pixels)`):
280
- The classes of each latent pixel at time `t`
281
-
282
- t (torch.Long):
283
- The timestep that determines which transition matrix is used.
284
-
285
- Returns:
286
- `torch.FloatTensor` of shape `(batch size, num classes, num latent pixels)`:
287
- The log probabilities for the predicted classes of the image at timestep `t-1`. I.e. Equation (11).
288
- """
289
- log_onehot_x_t = index_to_log_onehot(x_t, self.num_embed)
290
-
291
- log_q_x_t_given_x_0 = self.log_Q_t_transitioning_to_known_class(
292
- t=t, x_t=x_t, log_onehot_x_t=log_onehot_x_t, cumulative=True
293
- )
294
-
295
- log_q_t_given_x_t_min_1 = self.log_Q_t_transitioning_to_known_class(
296
- t=t, x_t=x_t, log_onehot_x_t=log_onehot_x_t, cumulative=False
297
- )
298
-
299
- # p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) ... p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0)
300
- # . . .
301
- # . . .
302
- # . . .
303
- # p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) ... p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1})
304
- q = log_p_x_0 - log_q_x_t_given_x_0
305
-
306
- # sum_0 = p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) + ... + p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}), ... ,
307
- # sum_n = p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0) + ... + p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1})
308
- q_log_sum_exp = torch.logsumexp(q, dim=1, keepdim=True)
309
-
310
- # p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_0 ... p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_n
311
- # . . .
312
- # . . .
313
- # . . .
314
- # p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_0 ... p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_n
315
- q = q - q_log_sum_exp
316
-
317
- # (p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_0) * a_cumulative_{t-1} + b_cumulative_{t-1} ... (p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_n) * a_cumulative_{t-1} + b_cumulative_{t-1}
318
- # . . .
319
- # . . .
320
- # . . .
321
- # (p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_0) * a_cumulative_{t-1} + b_cumulative_{t-1} ... (p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_n) * a_cumulative_{t-1} + b_cumulative_{t-1}
322
- # c_cumulative_{t-1} ... c_cumulative_{t-1}
323
- q = self.apply_cumulative_transitions(q, t - 1)
324
-
325
- # ((p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_0) * a_cumulative_{t-1} + b_cumulative_{t-1}) * q(x_t | x_{t-1}=C_0) * sum_0 ... ((p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_n) * a_cumulative_{t-1} + b_cumulative_{t-1}) * q(x_t | x_{t-1}=C_0) * sum_n
326
- # . . .
327
- # . . .
328
- # . . .
329
- # ((p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_0) * a_cumulative_{t-1} + b_cumulative_{t-1}) * q(x_t | x_{t-1}=C_{k-1}) * sum_0 ... ((p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_n) * a_cumulative_{t-1} + b_cumulative_{t-1}) * q(x_t | x_{t-1}=C_{k-1}) * sum_n
330
- # c_cumulative_{t-1} * q(x_t | x_{t-1}=C_k) * sum_0 ... c_cumulative_{t-1} * q(x_t | x_{t-1}=C_k) * sum_0
331
- log_p_x_t_min_1 = q + log_q_t_given_x_t_min_1 + q_log_sum_exp
332
-
333
- # For each column, there are two possible cases.
334
- #
335
- # Where:
336
- # - sum(p_n(x_0))) is summing over all classes for x_0
337
- # - C_i is the class transitioning from (not to be confused with c_t and c_cumulative_t being used for gamma's)
338
- # - C_j is the class transitioning to
339
- #
340
- # 1. x_t is masked i.e. x_t = c_k
341
- #
342
- # Simplifying the expression, the column vector is:
343
- # .
344
- # .
345
- # .
346
- # (c_t / c_cumulative_t) * (a_cumulative_{t-1} * p_n(x_0 = C_i | x_t) + b_cumulative_{t-1} * sum(p_n(x_0)))
347
- # .
348
- # .
349
- # .
350
- # (c_cumulative_{t-1} / c_cumulative_t) * sum(p_n(x_0))
351
- #
352
- # From equation (11) stated in terms of forward probabilities, the last row is trivially verified.
353
- #
354
- # For the other rows, we can state the equation as ...
355
- #
356
- # (c_t / c_cumulative_t) * [b_cumulative_{t-1} * p(x_0=c_0) + ... + (a_cumulative_{t-1} + b_cumulative_{t-1}) * p(x_0=C_i) + ... + b_cumulative_{k-1} * p(x_0=c_{k-1})]
357
- #
358
- # This verifies the other rows.
359
- #
360
- # 2. x_t is not masked
361
- #
362
- # Simplifying the expression, there are two cases for the rows of the column vector, where C_j = C_i and where C_j != C_i:
363
- # .
364
- # .
365
- # .
366
- # C_j != C_i: b_t * ((b_cumulative_{t-1} / b_cumulative_t) * p_n(x_0 = c_0) + ... + ((a_cumulative_{t-1} + b_cumulative_{t-1}) / b_cumulative_t) * p_n(x_0 = C_i) + ... + (b_cumulative_{t-1} / (a_cumulative_t + b_cumulative_t)) * p_n(c_0=C_j) + ... + (b_cumulative_{t-1} / b_cumulative_t) * p_n(x_0 = c_{k-1}))
367
- # .
368
- # .
369
- # .
370
- # C_j = C_i: (a_t + b_t) * ((b_cumulative_{t-1} / b_cumulative_t) * p_n(x_0 = c_0) + ... + ((a_cumulative_{t-1} + b_cumulative_{t-1}) / (a_cumulative_t + b_cumulative_t)) * p_n(x_0 = C_i = C_j) + ... + (b_cumulative_{t-1} / b_cumulative_t) * p_n(x_0 = c_{k-1}))
371
- # .
372
- # .
373
- # .
374
- # 0
375
- #
376
- # The last row is trivially verified. The other rows can be verified by directly expanding equation (11) stated in terms of forward probabilities.
377
- return log_p_x_t_min_1
378
-
379
- def log_Q_t_transitioning_to_known_class(
380
- self, *, t: torch.int, x_t: torch.LongTensor, log_onehot_x_t: torch.FloatTensor, cumulative: bool
381
- ):
382
- """
383
- Returns the log probabilities of the rows from the (cumulative or non-cumulative) transition matrix for each
384
- latent pixel in `x_t`.
385
-
386
- See equation (7) for the complete non-cumulative transition matrix. The complete cumulative transition matrix
387
- is the same structure except the parameters (alpha, beta, gamma) are the cumulative analogs.
388
-
389
- Args:
390
- t (torch.Long):
391
- The timestep that determines which transition matrix is used.
392
-
393
- x_t (`torch.LongTensor` of shape `(batch size, num latent pixels)`):
394
- The classes of each latent pixel at time `t`.
395
-
396
- log_onehot_x_t (`torch.FloatTensor` of shape `(batch size, num classes, num latent pixels)`):
397
- The log one-hot vectors of `x_t`
398
-
399
- cumulative (`bool`):
400
- If cumulative is `False`, we use the single step transition matrix `t-1`->`t`. If cumulative is `True`,
401
- we use the cumulative transition matrix `0`->`t`.
402
-
403
- Returns:
404
- `torch.FloatTensor` of shape `(batch size, num classes - 1, num latent pixels)`:
405
- Each _column_ of the returned matrix is a _row_ of log probabilities of the complete probability
406
- transition matrix.
407
-
408
- When non cumulative, returns `self.num_classes - 1` rows because the initial latent pixel cannot be
409
- masked.
410
-
411
- Where:
412
- - `q_n` is the probability distribution for the forward process of the `n`th latent pixel.
413
- - C_0 is a class of a latent pixel embedding
414
- - C_k is the class of the masked latent pixel
415
-
416
- non-cumulative result (omitting logarithms):
417
- ```
418
- q_0(x_t | x_{t-1} = C_0) ... q_n(x_t | x_{t-1} = C_0)
419
- . . .
420
- . . .
421
- . . .
422
- q_0(x_t | x_{t-1} = C_k) ... q_n(x_t | x_{t-1} = C_k)
423
- ```
424
-
425
- cumulative result (omitting logarithms):
426
- ```
427
- q_0_cumulative(x_t | x_0 = C_0) ... q_n_cumulative(x_t | x_0 = C_0)
428
- . . .
429
- . . .
430
- . . .
431
- q_0_cumulative(x_t | x_0 = C_{k-1}) ... q_n_cumulative(x_t | x_0 = C_{k-1})
432
- ```
433
- """
434
- if cumulative:
435
- a = self.log_cumprod_at[t]
436
- b = self.log_cumprod_bt[t]
437
- c = self.log_cumprod_ct[t]
438
- else:
439
- a = self.log_at[t]
440
- b = self.log_bt[t]
441
- c = self.log_ct[t]
442
-
443
- if not cumulative:
444
- # The values in the onehot vector can also be used as the logprobs for transitioning
445
- # from masked latent pixels. If we are not calculating the cumulative transitions,
446
- # we need to save these vectors to be re-appended to the final matrix so the values
447
- # aren't overwritten.
448
- #
449
- # `P(x_t!=mask|x_{t-1=mask}) = 0` and 0 will be the value of the last row of the onehot vector
450
- # if x_t is not masked
451
- #
452
- # `P(x_t=mask|x_{t-1=mask}) = 1` and 1 will be the value of the last row of the onehot vector
453
- # if x_t is masked
454
- log_onehot_x_t_transitioning_from_masked = log_onehot_x_t[:, -1, :].unsqueeze(1)
455
-
456
- # `index_to_log_onehot` will add onehot vectors for masked pixels,
457
- # so the default one hot matrix has one too many rows. See the doc string
458
- # for an explanation of the dimensionality of the returned matrix.
459
- log_onehot_x_t = log_onehot_x_t[:, :-1, :]
460
-
461
- # this is a cheeky trick to produce the transition probabilities using log one-hot vectors.
462
- #
463
- # Don't worry about what values this sets in the columns that mark transitions
464
- # to masked latent pixels. They are overwrote later with the `mask_class_mask`.
465
- #
466
- # Looking at the below logspace formula in non-logspace, each value will evaluate to either
467
- # `1 * a + b = a + b` where `log_Q_t` has the one hot value in the column
468
- # or
469
- # `0 * a + b = b` where `log_Q_t` has the 0 values in the column.
470
- #
471
- # See equation 7 for more details.
472
- log_Q_t = (log_onehot_x_t + a).logaddexp(b)
473
-
474
- # The whole column of each masked pixel is `c`
475
- mask_class_mask = x_t == self.mask_class
476
- mask_class_mask = mask_class_mask.unsqueeze(1).expand(-1, self.num_embed - 1, -1)
477
- log_Q_t[mask_class_mask] = c
478
-
479
- if not cumulative:
480
- log_Q_t = torch.cat((log_Q_t, log_onehot_x_t_transitioning_from_masked), dim=1)
481
-
482
- return log_Q_t
483
-
484
- def apply_cumulative_transitions(self, q, t):
485
- bsz = q.shape[0]
486
- a = self.log_cumprod_at[t]
487
- b = self.log_cumprod_bt[t]
488
- c = self.log_cumprod_ct[t]
489
-
490
- num_latent_pixels = q.shape[2]
491
- c = c.expand(bsz, 1, num_latent_pixels)
492
-
493
- q = (q + a).logaddexp(b)
494
- q = torch.cat((q, c), dim=1)
495
-
496
- return q
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = './mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py'
2
- # learning policy
3
- lr_config = dict(step=[16, 23])
4
- runner = dict(type='EpochBasedRunner', max_epochs=24)
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/yolo/yolov3_d53_mstrain-608_273e_coco.py DELETED
@@ -1,124 +0,0 @@
1
- _base_ = '../_base_/default_runtime.py'
2
- # model settings
3
- model = dict(
4
- type='YOLOV3',
5
- pretrained='open-mmlab://darknet53',
6
- backbone=dict(type='Darknet', depth=53, out_indices=(3, 4, 5)),
7
- neck=dict(
8
- type='YOLOV3Neck',
9
- num_scales=3,
10
- in_channels=[1024, 512, 256],
11
- out_channels=[512, 256, 128]),
12
- bbox_head=dict(
13
- type='YOLOV3Head',
14
- num_classes=80,
15
- in_channels=[512, 256, 128],
16
- out_channels=[1024, 512, 256],
17
- anchor_generator=dict(
18
- type='YOLOAnchorGenerator',
19
- base_sizes=[[(116, 90), (156, 198), (373, 326)],
20
- [(30, 61), (62, 45), (59, 119)],
21
- [(10, 13), (16, 30), (33, 23)]],
22
- strides=[32, 16, 8]),
23
- bbox_coder=dict(type='YOLOBBoxCoder'),
24
- featmap_strides=[32, 16, 8],
25
- loss_cls=dict(
26
- type='CrossEntropyLoss',
27
- use_sigmoid=True,
28
- loss_weight=1.0,
29
- reduction='sum'),
30
- loss_conf=dict(
31
- type='CrossEntropyLoss',
32
- use_sigmoid=True,
33
- loss_weight=1.0,
34
- reduction='sum'),
35
- loss_xy=dict(
36
- type='CrossEntropyLoss',
37
- use_sigmoid=True,
38
- loss_weight=2.0,
39
- reduction='sum'),
40
- loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')),
41
- # training and testing settings
42
- train_cfg=dict(
43
- assigner=dict(
44
- type='GridAssigner',
45
- pos_iou_thr=0.5,
46
- neg_iou_thr=0.5,
47
- min_pos_iou=0)),
48
- test_cfg=dict(
49
- nms_pre=1000,
50
- min_bbox_size=0,
51
- score_thr=0.05,
52
- conf_thr=0.005,
53
- nms=dict(type='nms', iou_threshold=0.45),
54
- max_per_img=100))
55
- # dataset settings
56
- dataset_type = 'CocoDataset'
57
- data_root = 'data/coco/'
58
- img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True)
59
- train_pipeline = [
60
- dict(type='LoadImageFromFile', to_float32=True),
61
- dict(type='LoadAnnotations', with_bbox=True),
62
- dict(type='PhotoMetricDistortion'),
63
- dict(
64
- type='Expand',
65
- mean=img_norm_cfg['mean'],
66
- to_rgb=img_norm_cfg['to_rgb'],
67
- ratio_range=(1, 2)),
68
- dict(
69
- type='MinIoURandomCrop',
70
- min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
71
- min_crop_size=0.3),
72
- dict(type='Resize', img_scale=[(320, 320), (608, 608)], keep_ratio=True),
73
- dict(type='RandomFlip', flip_ratio=0.5),
74
- dict(type='Normalize', **img_norm_cfg),
75
- dict(type='Pad', size_divisor=32),
76
- dict(type='DefaultFormatBundle'),
77
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
78
- ]
79
- test_pipeline = [
80
- dict(type='LoadImageFromFile'),
81
- dict(
82
- type='MultiScaleFlipAug',
83
- img_scale=(608, 608),
84
- flip=False,
85
- transforms=[
86
- dict(type='Resize', keep_ratio=True),
87
- dict(type='RandomFlip'),
88
- dict(type='Normalize', **img_norm_cfg),
89
- dict(type='Pad', size_divisor=32),
90
- dict(type='ImageToTensor', keys=['img']),
91
- dict(type='Collect', keys=['img'])
92
- ])
93
- ]
94
- data = dict(
95
- samples_per_gpu=8,
96
- workers_per_gpu=4,
97
- train=dict(
98
- type=dataset_type,
99
- ann_file=data_root + 'annotations/instances_train2017.json',
100
- img_prefix=data_root + 'train2017/',
101
- pipeline=train_pipeline),
102
- val=dict(
103
- type=dataset_type,
104
- ann_file=data_root + 'annotations/instances_val2017.json',
105
- img_prefix=data_root + 'val2017/',
106
- pipeline=test_pipeline),
107
- test=dict(
108
- type=dataset_type,
109
- ann_file=data_root + 'annotations/instances_val2017.json',
110
- img_prefix=data_root + 'val2017/',
111
- pipeline=test_pipeline))
112
- # optimizer
113
- optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0005)
114
- optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
115
- # learning policy
116
- lr_config = dict(
117
- policy='step',
118
- warmup='linear',
119
- warmup_iters=2000, # same as burn-in in darknet
120
- warmup_ratio=0.1,
121
- step=[218, 246])
122
- # runtime settings
123
- runner = dict(type='EpochBasedRunner', max_epochs=273)
124
- evaluation = dict(interval=1, metric=['bbox'])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/util/slio.py DELETED
@@ -1,177 +0,0 @@
1
- # ==========================================================
2
- # Modified from mmcv
3
- # ==========================================================
4
-
5
- import json
6
- import pickle
7
- from abc import ABCMeta, abstractmethod
8
- from pathlib import Path
9
-
10
- import yaml
11
-
12
- try:
13
- from yaml import CLoader as Loader, CDumper as Dumper
14
- except ImportError:
15
- from yaml import Loader, Dumper
16
-
17
-
18
- # ===========================
19
- # Rigister handler
20
- # ===========================
21
-
22
-
23
- class BaseFileHandler(metaclass=ABCMeta):
24
- @abstractmethod
25
- def load_from_fileobj(self, file, **kwargs):
26
- pass
27
-
28
- @abstractmethod
29
- def dump_to_fileobj(self, obj, file, **kwargs):
30
- pass
31
-
32
- @abstractmethod
33
- def dump_to_str(self, obj, **kwargs):
34
- pass
35
-
36
- def load_from_path(self, filepath, mode="r", **kwargs):
37
- with open(filepath, mode) as f:
38
- return self.load_from_fileobj(f, **kwargs)
39
-
40
- def dump_to_path(self, obj, filepath, mode="w", **kwargs):
41
- with open(filepath, mode) as f:
42
- self.dump_to_fileobj(obj, f, **kwargs)
43
-
44
-
45
- class JsonHandler(BaseFileHandler):
46
- def load_from_fileobj(self, file):
47
- return json.load(file)
48
-
49
- def dump_to_fileobj(self, obj, file, **kwargs):
50
- json.dump(obj, file, **kwargs)
51
-
52
- def dump_to_str(self, obj, **kwargs):
53
- return json.dumps(obj, **kwargs)
54
-
55
-
56
- class PickleHandler(BaseFileHandler):
57
- def load_from_fileobj(self, file, **kwargs):
58
- return pickle.load(file, **kwargs)
59
-
60
- def load_from_path(self, filepath, **kwargs):
61
- return super(PickleHandler, self).load_from_path(filepath, mode="rb", **kwargs)
62
-
63
- def dump_to_str(self, obj, **kwargs):
64
- kwargs.setdefault("protocol", 2)
65
- return pickle.dumps(obj, **kwargs)
66
-
67
- def dump_to_fileobj(self, obj, file, **kwargs):
68
- kwargs.setdefault("protocol", 2)
69
- pickle.dump(obj, file, **kwargs)
70
-
71
- def dump_to_path(self, obj, filepath, **kwargs):
72
- super(PickleHandler, self).dump_to_path(obj, filepath, mode="wb", **kwargs)
73
-
74
-
75
- class YamlHandler(BaseFileHandler):
76
- def load_from_fileobj(self, file, **kwargs):
77
- kwargs.setdefault("Loader", Loader)
78
- return yaml.load(file, **kwargs)
79
-
80
- def dump_to_fileobj(self, obj, file, **kwargs):
81
- kwargs.setdefault("Dumper", Dumper)
82
- yaml.dump(obj, file, **kwargs)
83
-
84
- def dump_to_str(self, obj, **kwargs):
85
- kwargs.setdefault("Dumper", Dumper)
86
- return yaml.dump(obj, **kwargs)
87
-
88
-
89
- file_handlers = {
90
- "json": JsonHandler(),
91
- "yaml": YamlHandler(),
92
- "yml": YamlHandler(),
93
- "pickle": PickleHandler(),
94
- "pkl": PickleHandler(),
95
- }
96
-
97
- # ===========================
98
- # load and dump
99
- # ===========================
100
-
101
-
102
- def is_str(x):
103
- """Whether the input is an string instance.
104
-
105
- Note: This method is deprecated since python 2 is no longer supported.
106
- """
107
- return isinstance(x, str)
108
-
109
-
110
- def slload(file, file_format=None, **kwargs):
111
- """Load data from json/yaml/pickle files.
112
-
113
- This method provides a unified api for loading data from serialized files.
114
-
115
- Args:
116
- file (str or :obj:`Path` or file-like object): Filename or a file-like
117
- object.
118
- file_format (str, optional): If not specified, the file format will be
119
- inferred from the file extension, otherwise use the specified one.
120
- Currently supported formats include "json", "yaml/yml" and
121
- "pickle/pkl".
122
-
123
- Returns:
124
- The content from the file.
125
- """
126
- if isinstance(file, Path):
127
- file = str(file)
128
- if file_format is None and is_str(file):
129
- file_format = file.split(".")[-1]
130
- if file_format not in file_handlers:
131
- raise TypeError(f"Unsupported format: {file_format}")
132
-
133
- handler = file_handlers[file_format]
134
- if is_str(file):
135
- obj = handler.load_from_path(file, **kwargs)
136
- elif hasattr(file, "read"):
137
- obj = handler.load_from_fileobj(file, **kwargs)
138
- else:
139
- raise TypeError('"file" must be a filepath str or a file-object')
140
- return obj
141
-
142
-
143
- def sldump(obj, file=None, file_format=None, **kwargs):
144
- """Dump data to json/yaml/pickle strings or files.
145
-
146
- This method provides a unified api for dumping data as strings or to files,
147
- and also supports custom arguments for each file format.
148
-
149
- Args:
150
- obj (any): The python object to be dumped.
151
- file (str or :obj:`Path` or file-like object, optional): If not
152
- specified, then the object is dump to a str, otherwise to a file
153
- specified by the filename or file-like object.
154
- file_format (str, optional): Same as :func:`load`.
155
-
156
- Returns:
157
- bool: True for success, False otherwise.
158
- """
159
- if isinstance(file, Path):
160
- file = str(file)
161
- if file_format is None:
162
- if is_str(file):
163
- file_format = file.split(".")[-1]
164
- elif file is None:
165
- raise ValueError("file_format must be specified since file is None")
166
- if file_format not in file_handlers:
167
- raise TypeError(f"Unsupported format: {file_format}")
168
-
169
- handler = file_handlers[file_format]
170
- if file is None:
171
- return handler.dump_to_str(obj, **kwargs)
172
- elif is_str(file):
173
- handler.dump_to_path(obj, file, **kwargs)
174
- elif hasattr(file, "write"):
175
- handler.dump_to_fileobj(obj, file, **kwargs)
176
- else:
177
- raise TypeError('"file" must be a filename str or a file-object')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/network/lazy_wheel.py DELETED
@@ -1,210 +0,0 @@
1
- """Lazy ZIP over HTTP"""
2
-
3
- __all__ = ["HTTPRangeRequestUnsupported", "dist_from_wheel_url"]
4
-
5
- from bisect import bisect_left, bisect_right
6
- from contextlib import contextmanager
7
- from tempfile import NamedTemporaryFile
8
- from typing import Any, Dict, Generator, List, Optional, Tuple
9
- from zipfile import BadZipFile, ZipFile
10
-
11
- from pip._vendor.packaging.utils import canonicalize_name
12
- from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response
13
-
14
- from pip._internal.metadata import BaseDistribution, MemoryWheel, get_wheel_distribution
15
- from pip._internal.network.session import PipSession
16
- from pip._internal.network.utils import HEADERS, raise_for_status, response_chunks
17
-
18
-
19
- class HTTPRangeRequestUnsupported(Exception):
20
- pass
21
-
22
-
23
- def dist_from_wheel_url(name: str, url: str, session: PipSession) -> BaseDistribution:
24
- """Return a distribution object from the given wheel URL.
25
-
26
- This uses HTTP range requests to only fetch the portion of the wheel
27
- containing metadata, just enough for the object to be constructed.
28
- If such requests are not supported, HTTPRangeRequestUnsupported
29
- is raised.
30
- """
31
- with LazyZipOverHTTP(url, session) as zf:
32
- # For read-only ZIP files, ZipFile only needs methods read,
33
- # seek, seekable and tell, not the whole IO protocol.
34
- wheel = MemoryWheel(zf.name, zf) # type: ignore
35
- # After context manager exit, wheel.name
36
- # is an invalid file by intention.
37
- return get_wheel_distribution(wheel, canonicalize_name(name))
38
-
39
-
40
- class LazyZipOverHTTP:
41
- """File-like object mapped to a ZIP file over HTTP.
42
-
43
- This uses HTTP range requests to lazily fetch the file's content,
44
- which is supposed to be fed to ZipFile. If such requests are not
45
- supported by the server, raise HTTPRangeRequestUnsupported
46
- during initialization.
47
- """
48
-
49
- def __init__(
50
- self, url: str, session: PipSession, chunk_size: int = CONTENT_CHUNK_SIZE
51
- ) -> None:
52
- head = session.head(url, headers=HEADERS)
53
- raise_for_status(head)
54
- assert head.status_code == 200
55
- self._session, self._url, self._chunk_size = session, url, chunk_size
56
- self._length = int(head.headers["Content-Length"])
57
- self._file = NamedTemporaryFile()
58
- self.truncate(self._length)
59
- self._left: List[int] = []
60
- self._right: List[int] = []
61
- if "bytes" not in head.headers.get("Accept-Ranges", "none"):
62
- raise HTTPRangeRequestUnsupported("range request is not supported")
63
- self._check_zip()
64
-
65
- @property
66
- def mode(self) -> str:
67
- """Opening mode, which is always rb."""
68
- return "rb"
69
-
70
- @property
71
- def name(self) -> str:
72
- """Path to the underlying file."""
73
- return self._file.name
74
-
75
- def seekable(self) -> bool:
76
- """Return whether random access is supported, which is True."""
77
- return True
78
-
79
- def close(self) -> None:
80
- """Close the file."""
81
- self._file.close()
82
-
83
- @property
84
- def closed(self) -> bool:
85
- """Whether the file is closed."""
86
- return self._file.closed
87
-
88
- def read(self, size: int = -1) -> bytes:
89
- """Read up to size bytes from the object and return them.
90
-
91
- As a convenience, if size is unspecified or -1,
92
- all bytes until EOF are returned. Fewer than
93
- size bytes may be returned if EOF is reached.
94
- """
95
- download_size = max(size, self._chunk_size)
96
- start, length = self.tell(), self._length
97
- stop = length if size < 0 else min(start + download_size, length)
98
- start = max(0, stop - download_size)
99
- self._download(start, stop - 1)
100
- return self._file.read(size)
101
-
102
- def readable(self) -> bool:
103
- """Return whether the file is readable, which is True."""
104
- return True
105
-
106
- def seek(self, offset: int, whence: int = 0) -> int:
107
- """Change stream position and return the new absolute position.
108
-
109
- Seek to offset relative position indicated by whence:
110
- * 0: Start of stream (the default). pos should be >= 0;
111
- * 1: Current position - pos may be negative;
112
- * 2: End of stream - pos usually negative.
113
- """
114
- return self._file.seek(offset, whence)
115
-
116
- def tell(self) -> int:
117
- """Return the current position."""
118
- return self._file.tell()
119
-
120
- def truncate(self, size: Optional[int] = None) -> int:
121
- """Resize the stream to the given size in bytes.
122
-
123
- If size is unspecified resize to the current position.
124
- The current stream position isn't changed.
125
-
126
- Return the new file size.
127
- """
128
- return self._file.truncate(size)
129
-
130
- def writable(self) -> bool:
131
- """Return False."""
132
- return False
133
-
134
- def __enter__(self) -> "LazyZipOverHTTP":
135
- self._file.__enter__()
136
- return self
137
-
138
- def __exit__(self, *exc: Any) -> None:
139
- self._file.__exit__(*exc)
140
-
141
- @contextmanager
142
- def _stay(self) -> Generator[None, None, None]:
143
- """Return a context manager keeping the position.
144
-
145
- At the end of the block, seek back to original position.
146
- """
147
- pos = self.tell()
148
- try:
149
- yield
150
- finally:
151
- self.seek(pos)
152
-
153
- def _check_zip(self) -> None:
154
- """Check and download until the file is a valid ZIP."""
155
- end = self._length - 1
156
- for start in reversed(range(0, end, self._chunk_size)):
157
- self._download(start, end)
158
- with self._stay():
159
- try:
160
- # For read-only ZIP files, ZipFile only needs
161
- # methods read, seek, seekable and tell.
162
- ZipFile(self) # type: ignore
163
- except BadZipFile:
164
- pass
165
- else:
166
- break
167
-
168
- def _stream_response(
169
- self, start: int, end: int, base_headers: Dict[str, str] = HEADERS
170
- ) -> Response:
171
- """Return HTTP response to a range request from start to end."""
172
- headers = base_headers.copy()
173
- headers["Range"] = f"bytes={start}-{end}"
174
- # TODO: Get range requests to be correctly cached
175
- headers["Cache-Control"] = "no-cache"
176
- return self._session.get(self._url, headers=headers, stream=True)
177
-
178
- def _merge(
179
- self, start: int, end: int, left: int, right: int
180
- ) -> Generator[Tuple[int, int], None, None]:
181
- """Return a generator of intervals to be fetched.
182
-
183
- Args:
184
- start (int): Start of needed interval
185
- end (int): End of needed interval
186
- left (int): Index of first overlapping downloaded data
187
- right (int): Index after last overlapping downloaded data
188
- """
189
- lslice, rslice = self._left[left:right], self._right[left:right]
190
- i = start = min([start] + lslice[:1])
191
- end = max([end] + rslice[-1:])
192
- for j, k in zip(lslice, rslice):
193
- if j > i:
194
- yield i, j - 1
195
- i = k + 1
196
- if i <= end:
197
- yield i, end
198
- self._left[left:right], self._right[left:right] = [start], [end]
199
-
200
- def _download(self, start: int, end: int) -> None:
201
- """Download bytes from start to end inclusively."""
202
- with self._stay():
203
- left = bisect_left(self._right, start)
204
- right = bisect_right(self._left, end)
205
- for start, end in self._merge(start, end, left, right):
206
- response = self._stream_response(start, end)
207
- response.raise_for_status()
208
- self.seek(start)
209
- for chunk in response_chunks(response, self._chunk_size):
210
- self._file.write(chunk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/codingstatemachinedict.py DELETED
@@ -1,19 +0,0 @@
1
- from typing import TYPE_CHECKING, Tuple
2
-
3
- if TYPE_CHECKING:
4
- # TypedDict was introduced in Python 3.8.
5
- #
6
- # TODO: Remove the else block and TYPE_CHECKING check when dropping support
7
- # for Python 3.7.
8
- from typing import TypedDict
9
-
10
- class CodingStateMachineDict(TypedDict, total=False):
11
- class_table: Tuple[int, ...]
12
- class_factor: int
13
- state_table: Tuple[int, ...]
14
- char_len_table: Tuple[int, ...]
15
- name: str
16
- language: str # Optional key
17
-
18
- else:
19
- CodingStateMachineDict = dict
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/spinner.py DELETED
@@ -1,137 +0,0 @@
1
- from typing import cast, List, Optional, TYPE_CHECKING, Union
2
-
3
- from ._spinners import SPINNERS
4
- from .measure import Measurement
5
- from .table import Table
6
- from .text import Text
7
-
8
- if TYPE_CHECKING:
9
- from .console import Console, ConsoleOptions, RenderResult, RenderableType
10
- from .style import StyleType
11
-
12
-
13
- class Spinner:
14
- """A spinner animation.
15
-
16
- Args:
17
- name (str): Name of spinner (run python -m rich.spinner).
18
- text (RenderableType, optional): A renderable to display at the right of the spinner (str or Text typically). Defaults to "".
19
- style (StyleType, optional): Style for spinner animation. Defaults to None.
20
- speed (float, optional): Speed factor for animation. Defaults to 1.0.
21
-
22
- Raises:
23
- KeyError: If name isn't one of the supported spinner animations.
24
- """
25
-
26
- def __init__(
27
- self,
28
- name: str,
29
- text: "RenderableType" = "",
30
- *,
31
- style: Optional["StyleType"] = None,
32
- speed: float = 1.0,
33
- ) -> None:
34
- try:
35
- spinner = SPINNERS[name]
36
- except KeyError:
37
- raise KeyError(f"no spinner called {name!r}")
38
- self.text: "Union[RenderableType, Text]" = (
39
- Text.from_markup(text) if isinstance(text, str) else text
40
- )
41
- self.frames = cast(List[str], spinner["frames"])[:]
42
- self.interval = cast(float, spinner["interval"])
43
- self.start_time: Optional[float] = None
44
- self.style = style
45
- self.speed = speed
46
- self.frame_no_offset: float = 0.0
47
- self._update_speed = 0.0
48
-
49
- def __rich_console__(
50
- self, console: "Console", options: "ConsoleOptions"
51
- ) -> "RenderResult":
52
- yield self.render(console.get_time())
53
-
54
- def __rich_measure__(
55
- self, console: "Console", options: "ConsoleOptions"
56
- ) -> Measurement:
57
- text = self.render(0)
58
- return Measurement.get(console, options, text)
59
-
60
- def render(self, time: float) -> "RenderableType":
61
- """Render the spinner for a given time.
62
-
63
- Args:
64
- time (float): Time in seconds.
65
-
66
- Returns:
67
- RenderableType: A renderable containing animation frame.
68
- """
69
- if self.start_time is None:
70
- self.start_time = time
71
-
72
- frame_no = ((time - self.start_time) * self.speed) / (
73
- self.interval / 1000.0
74
- ) + self.frame_no_offset
75
- frame = Text(
76
- self.frames[int(frame_no) % len(self.frames)], style=self.style or ""
77
- )
78
-
79
- if self._update_speed:
80
- self.frame_no_offset = frame_no
81
- self.start_time = time
82
- self.speed = self._update_speed
83
- self._update_speed = 0.0
84
-
85
- if not self.text:
86
- return frame
87
- elif isinstance(self.text, (str, Text)):
88
- return Text.assemble(frame, " ", self.text)
89
- else:
90
- table = Table.grid(padding=1)
91
- table.add_row(frame, self.text)
92
- return table
93
-
94
- def update(
95
- self,
96
- *,
97
- text: "RenderableType" = "",
98
- style: Optional["StyleType"] = None,
99
- speed: Optional[float] = None,
100
- ) -> None:
101
- """Updates attributes of a spinner after it has been started.
102
-
103
- Args:
104
- text (RenderableType, optional): A renderable to display at the right of the spinner (str or Text typically). Defaults to "".
105
- style (StyleType, optional): Style for spinner animation. Defaults to None.
106
- speed (float, optional): Speed factor for animation. Defaults to None.
107
- """
108
- if text:
109
- self.text = Text.from_markup(text) if isinstance(text, str) else text
110
- if style:
111
- self.style = style
112
- if speed:
113
- self._update_speed = speed
114
-
115
-
116
- if __name__ == "__main__": # pragma: no cover
117
- from time import sleep
118
-
119
- from .columns import Columns
120
- from .panel import Panel
121
- from .live import Live
122
-
123
- all_spinners = Columns(
124
- [
125
- Spinner(spinner_name, text=Text(repr(spinner_name), style="green"))
126
- for spinner_name in sorted(SPINNERS.keys())
127
- ],
128
- column_first=True,
129
- expand=True,
130
- )
131
-
132
- with Live(
133
- Panel(all_spinners, title="Spinners", border_style="blue"),
134
- refresh_per_second=20,
135
- ) as live:
136
- while True:
137
- sleep(0.1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/debug.py DELETED
@@ -1,5 +0,0 @@
1
- import os
2
-
3
- # If DISTUTILS_DEBUG is anything other than the empty string, we run in
4
- # debug mode.
5
- DEBUG = os.environ.get('DISTUTILS_DEBUG')
 
 
 
 
 
 
spaces/Atualli/mediapipe-pose-estimation/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Mediapipe Pose Estimation
3
- emoji: 👁
4
- colorFrom: blue
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.36.1
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: hysts/mediapipe-pose-estimation
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_50_FPN_400ep_LSJ.py DELETED
@@ -1,14 +0,0 @@
1
- from .mask_rcnn_R_50_FPN_100ep_LSJ import (
2
- dataloader,
3
- lr_multiplier,
4
- model,
5
- optimizer,
6
- train,
7
- )
8
-
9
- train.max_iter *= 4 # 100ep -> 400ep
10
-
11
- lr_multiplier.scheduler.milestones = [
12
- milestone * 4 for milestone in lr_multiplier.scheduler.milestones
13
- ]
14
- lr_multiplier.scheduler.num_updates = train.max_iter
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Babelscape/mrebel-demo/app.py DELETED
@@ -1,123 +0,0 @@
1
- import streamlit as st
2
- from datasets import load_dataset
3
- from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
4
- from time import time
5
- import torch
6
-
7
-
8
- def load_tok_and_data(lan):
9
- st_time = time()
10
- tokenizer = AutoTokenizer.from_pretrained("Babelscape/mrebel-large", tgt_lang="tp_XX")
11
- tokenizer._src_lang = _Tokens[lan]
12
- tokenizer.cur_lang_code_id = tokenizer.convert_tokens_to_ids(_Tokens[lan])
13
- tokenizer.set_src_lang_special_tokens(_Tokens[lan])
14
- dataset = load_dataset('Babelscape/SREDFM', lan, split="test", streaming=True)
15
- dataset = [example for example in dataset.take(1001)]
16
- return (tokenizer, dataset)
17
-
18
- @st.cache_resource
19
- def load_model():
20
- st_time = time()
21
- print("+++++ loading Model", time() - st_time)
22
- model = AutoModelForSeq2SeqLM.from_pretrained("Babelscape/mrebel-large")
23
- if torch.cuda.is_available():
24
- _ = model.to("cuda:0") # comment if no GPU available
25
- _ = model.eval()
26
- print("+++++ loaded model", time() - st_time)
27
- return model
28
-
29
- def extract_triplets_typed(text):
30
- triplets = []
31
- relation = ''
32
- text = text.strip()
33
- current = 'x'
34
- subject, relation, object_, object_type, subject_type = '','','','',''
35
-
36
- for token in text.replace("<s>", "").replace("<pad>", "").replace("</s>", "").replace("tp_XX", "").replace("__en__", "").split():
37
- if token == "<triplet>" or token == "<relation>":
38
- current = 't'
39
- if relation != '':
40
- triplets.append({'head': subject.strip(), 'head_type': subject_type, 'type': relation.strip(),'tail': object_.strip(), 'tail_type': object_type})
41
- relation = ''
42
- subject = ''
43
- elif token.startswith("<") and token.endswith(">"):
44
- if current == 't' or current == 'o':
45
- current = 's'
46
- if relation != '':
47
- triplets.append({'head': subject.strip(), 'head_type': subject_type, 'type': relation.strip(),'tail': object_.strip(), 'tail_type': object_type})
48
- object_ = ''
49
- subject_type = token[1:-1]
50
- else:
51
- current = 'o'
52
- object_type = token[1:-1]
53
- relation = ''
54
- else:
55
- if current == 't':
56
- subject += ' ' + token
57
- elif current == 's':
58
- object_ += ' ' + token
59
- elif current == 'o':
60
- relation += ' ' + token
61
- if subject != '' and relation != '' and object_ != '' and object_type != '' and subject_type != '':
62
- triplets.append({'head': subject.strip(), 'head_type': subject_type, 'type': relation.strip(),'tail': object_.strip(), 'tail_type': object_type})
63
- return triplets
64
-
65
- st.markdown("""This is a demo for the ACL 2023 paper [RED$^{FM}$: a Filtered and Multilingual Relation Extraction Dataset](https://arxiv.org/abs/2306.09802). The pre-trained model is able to extract triplets for up to 400 relation types from Wikidata or be used in downstream Relation Extraction task by fine-tuning. Find the model card [here](https://huggingface.co/Babelscape/mrebel-large). Read more about it in the [paper](https://arxiv.org/abs/2306.09802) and in the original [repository](https://github.com/Babelscape/rebel#REDFM).""")
66
-
67
- model = load_model()
68
-
69
- lan = st.selectbox(
70
- 'Select a Language',
71
- ('ar', 'ca', 'de', 'el', 'en', 'es', 'fr', 'hi', 'it', 'ja', 'ko', 'nl', 'pl', 'pt', 'ru', 'sv', 'vi', 'zh'), index=1)
72
-
73
- _Tokens = {'en': 'en_XX', 'de': 'de_DE', 'ca': 'ca_XX', 'ar': 'ar_AR', 'el': 'el_EL', 'es': 'es_XX', 'it': 'it_IT', 'ja': 'ja_XX', 'ko': 'ko_KR', 'hi': 'hi_IN', 'pt': 'pt_XX', 'ru': 'ru_RU', 'pl': 'pl_PL', 'zh': 'zh_CN', 'fr': 'fr_XX', 'vi': 'vi_VN', 'sv':'sv_SE'}
74
-
75
- tokenizer, dataset = load_tok_and_data(lan)
76
-
77
- agree = st.checkbox('Free input', False)
78
- if agree:
79
- text = st.text_input('Input text (current example in catalan)', 'Els Red Hot Chili Peppers es van formar a Los Angeles per Kiedis, Flea, el guitarrista Hillel Slovak i el bateria Jack Irons.')
80
- print(text)
81
- else:
82
- dataset_example = st.slider('dataset id', 0, 1000, 0)
83
- text = dataset[dataset_example]['text']
84
- length_penalty = st.slider('length_penalty', 0, 10, 1)
85
- num_beams = st.slider('num_beams', 1, 20, 3)
86
- num_return_sequences = st.slider('num_return_sequences', 1, num_beams, 2)
87
-
88
- gen_kwargs = {
89
- "max_length": 256,
90
- "length_penalty": length_penalty,
91
- "num_beams": num_beams,
92
- "num_return_sequences": num_return_sequences,
93
- "forced_bos_token_id": None,
94
- }
95
-
96
- model_inputs = tokenizer(text, max_length=256, padding=True, truncation=True, return_tensors = 'pt')
97
- generated_tokens = model.generate(
98
- model_inputs["input_ids"].to(model.device),
99
- attention_mask=model_inputs["attention_mask"].to(model.device),
100
- decoder_start_token_id = tokenizer.convert_tokens_to_ids("tp_XX"),
101
- **gen_kwargs,
102
- )
103
-
104
- decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=False)
105
- st.title('Input text')
106
-
107
- st.write(text)
108
-
109
- if not agree:
110
- st.title('Silver output')
111
- entities = dataset[dataset_example]['entities']
112
- relations =[]
113
- for trip in dataset[dataset_example]['relations']:
114
- relations.append({'subject': entities[trip['subject']], 'predicate': trip['predicate'], 'object': entities[trip['object']]})
115
- st.write(relations)
116
-
117
- st.title('Prediction text')
118
- decoded_preds = [text.replace('<s>', '').replace('</s>', '').replace('<pad>', '') for text in decoded_preds]
119
- st.write(decoded_preds)
120
-
121
- for idx, sentence in enumerate(decoded_preds):
122
- st.title(f'Prediction triplets sentence {idx}')
123
- st.write(extract_triplets_typed(sentence))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bambicita/rvc-models/app.py DELETED
@@ -1,188 +0,0 @@
1
- import os
2
- import json
3
- import argparse
4
- import traceback
5
- import logging
6
- import gradio as gr
7
- import numpy as np
8
- import librosa
9
- import torch
10
- import asyncio
11
- import edge_tts
12
- from datetime import datetime
13
- from fairseq import checkpoint_utils
14
- from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono
15
- from vc_infer_pipeline import VC
16
- from config import (
17
- is_half,
18
- device
19
- )
20
- logging.getLogger("numba").setLevel(logging.WARNING)
21
- limitation = os.getenv("SYSTEM") == "spaces" # limit audio length in huggingface spaces
22
-
23
- def create_vc_fn(tgt_sr, net_g, vc, if_f0, file_index, file_big_npy):
24
- def vc_fn(
25
- input_audio,
26
- f0_up_key,
27
- f0_method,
28
- index_rate,
29
- tts_mode,
30
- tts_text,
31
- tts_voice
32
- ):
33
- try:
34
- if tts_mode:
35
- if len(tts_text) > 100 and limitation:
36
- return "Text is too long", None
37
- if tts_text is None or tts_voice is None:
38
- return "You need to enter text and select a voice", None
39
- asyncio.run(edge_tts.Communicate(tts_text, "-".join(tts_voice.split('-')[:-1])).save("tts.mp3"))
40
- audio, sr = librosa.load("tts.mp3", sr=16000, mono=True)
41
- else:
42
- if args.files:
43
- audio, sr = librosa.load(input_audio, sr=16000, mono=True)
44
- else:
45
- if input_audio is None:
46
- return "You need to upload an audio", None
47
- sampling_rate, audio = input_audio
48
- duration = audio.shape[0] / sampling_rate
49
- if duration > 20 and limitation:
50
- return "Please upload an audio file that is less than 20 seconds. If you need to generate a longer audio file, please use Colab.", None
51
- audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
52
- if len(audio.shape) > 1:
53
- audio = librosa.to_mono(audio.transpose(1, 0))
54
- if sampling_rate != 16000:
55
- audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
56
- times = [0, 0, 0]
57
- f0_up_key = int(f0_up_key)
58
- audio_opt = vc.pipeline(
59
- hubert_model,
60
- net_g,
61
- 0,
62
- audio,
63
- times,
64
- f0_up_key,
65
- f0_method,
66
- file_index,
67
- file_big_npy,
68
- index_rate,
69
- if_f0,
70
- )
71
- print(
72
- f"[{datetime.now().strftime('%Y-%m-%d %H:%M')}]: npy: {times[0]}, f0: {times[1]}s, infer: {times[2]}s"
73
- )
74
- return "Success", (tgt_sr, audio_opt)
75
- except:
76
- info = traceback.format_exc()
77
- print(info)
78
- return info, (None, None)
79
- return vc_fn
80
-
81
- def load_hubert():
82
- global hubert_model
83
- models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
84
- ["hubert_base.pt"],
85
- suffix="",
86
- )
87
- hubert_model = models[0]
88
- hubert_model = hubert_model.to(device)
89
- if is_half:
90
- hubert_model = hubert_model.half()
91
- else:
92
- hubert_model = hubert_model.float()
93
- hubert_model.eval()
94
-
95
- def change_to_tts_mode(tts_mode):
96
- if tts_mode:
97
- return gr.Audio.update(visible=False), gr.Textbox.update(visible=True), gr.Dropdown.update(visible=True)
98
- else:
99
- return gr.Audio.update(visible=True), gr.Textbox.update(visible=False), gr.Dropdown.update(visible=False)
100
-
101
- if __name__ == '__main__':
102
- parser = argparse.ArgumentParser()
103
- parser.add_argument('--api', action="store_true", default=False)
104
- parser.add_argument("--share", action="store_true", default=False, help="share gradio app")
105
- parser.add_argument("--files", action="store_true", default=False, help="load audio from path")
106
- args, unknown = parser.parse_known_args()
107
- load_hubert()
108
- models = []
109
- tts_voice_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices())
110
- voices = [f"{v['ShortName']}-{v['Gender']}" for v in tts_voice_list]
111
- with open("weights/model_info.json", "r", encoding="utf-8") as f:
112
- models_info = json.load(f)
113
- for name, info in models_info.items():
114
- if not info['enable']:
115
- continue
116
- title = info['title']
117
- author = info.get("author", None)
118
- cover = f"weights/{name}/{info['cover']}"
119
- index = f"weights/{name}/{info['feature_retrieval_library']}"
120
- npy = f"weights/{name}/{info['feature_file']}"
121
- cpt = torch.load(f"weights/{name}/{name}.pth", map_location="cpu")
122
- tgt_sr = cpt["config"][-1]
123
- cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
124
- if_f0 = cpt.get("f0", 1)
125
- if if_f0 == 1:
126
- net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half)
127
- else:
128
- net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
129
- del net_g.enc_q
130
- print(net_g.load_state_dict(cpt["weight"], strict=False)) # 不加这一行清不干净, 真奇葩
131
- net_g.eval().to(device)
132
- if is_half:
133
- net_g = net_g.half()
134
- else:
135
- net_g = net_g.float()
136
- vc = VC(tgt_sr, device, is_half)
137
- models.append((name, title, author, cover, create_vc_fn(tgt_sr, net_g, vc, if_f0, index, npy)))
138
- with gr.Blocks() as app:
139
- gr.Markdown(
140
- "# <center> RVC Models (Outdated)\n"
141
- "## <center> The input audio should be clean and pure voice without background music.\n"
142
- "### <center> Updated Repository: [NEW RVC Models](https://huggingface.co/spaces/ArkanDash/rvc-models-new).\n"
143
- "#### <center> Recommended to use the Google Colab version for more feature.\n"
144
- "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=ArkanDash.Rvc-Models)\n\n"
145
- "[![image](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1hx6kKvIuv5XNY1Gai2PEuZhpO5z6xpVh?usp=sharing)\n\n"
146
- "[![Original Repo](https://badgen.net/badge/icon/github?icon=github&label=Original%20Repo)](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI)"
147
- )
148
- with gr.Tabs():
149
- for (name, title, author, cover, vc_fn) in models:
150
- with gr.TabItem(name):
151
- with gr.Row():
152
- gr.Markdown(
153
- '<div align="center">'
154
- f'<div>{title}</div>\n'+
155
- (f'<div>Model author: {author}</div>' if author else "")+
156
- (f'<img style="width:auto;height:300px;" src="file/{cover}">' if cover else "")+
157
- '</div>'
158
- )
159
- with gr.Row():
160
- with gr.Column():
161
- if args.files:
162
- vc_input = gr.Textbox(label="Input audio path")
163
- else:
164
- vc_input = gr.Audio(label="Input audio"+' (less than 20 seconds)' if limitation else '')
165
- vc_transpose = gr.Number(label="Transpose", value=0)
166
- vc_f0method = gr.Radio(
167
- label="Pitch extraction algorithm, PM is fast but Harvest is better for low frequencies",
168
- choices=["pm", "harvest"],
169
- value="pm",
170
- interactive=True,
171
- )
172
- vc_index_ratio = gr.Slider(
173
- minimum=0,
174
- maximum=1,
175
- label="Retrieval feature ratio",
176
- value=0.6,
177
- interactive=True,
178
- )
179
- tts_mode = gr.Checkbox(label="tts (use edge-tts as input)", value=False)
180
- tts_text = gr.Textbox(visible=False,label="TTS text (100 words limitation)" if limitation else "TTS text")
181
- tts_voice = gr.Dropdown(label="Edge-tts speaker", choices=voices, visible=False, allow_custom_value=False, value="en-US-AnaNeural-Female")
182
- vc_submit = gr.Button("Generate", variant="primary")
183
- with gr.Column():
184
- vc_output1 = gr.Textbox(label="Output Message")
185
- vc_output2 = gr.Audio(label="Output Audio")
186
- vc_submit.click(vc_fn, [vc_input, vc_transpose, vc_f0method, vc_index_ratio, tts_mode, tts_text, tts_voice], [vc_output1, vc_output2])
187
- tts_mode.change(change_to_tts_mode, [tts_mode], [vc_input, tts_text, tts_voice])
188
- app.queue(concurrency_count=1, max_size=20, api_open=args.api).launch(share=args.share)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Gratis Brawl Stars.md DELETED
@@ -1,71 +0,0 @@
1
- <br />
2
- <h1>Descargar Hack jugar juntos: Cómo obtener dinero ilimitado, diamantes y gemas en el juego social popular</h1>
3
- <p>¿Te encanta jugar Play Together, el juego social donde puedes conocer amigos de todo el mundo, jugar minijuegos, decorar tu casa, vestir a tu personaje y criar mascotas? Si es así, es posible que se pregunte cómo obtener más dinero, diamantes y gemas en el juego sin gastar dinero real. Bueno, usted está de suerte porque en este artículo, le mostraremos cómo descargar hack jugar juntos y obtener recursos ilimitados en su cuenta. ¡Sigue leyendo para saber más! </p>
4
- <h2>descargar gratis brawl stars</h2><br /><p><b><b>Download</b> ->->->-> <a href="https://bltlly.com/2v6LOZ">https://bltlly.com/2v6LOZ</a></b></p><br /><br />
5
- <h2>¿Qué es jugar juntos? </h2>
6
- <p>Play Together es un mundo virtual donde puedes chatear con amigos, divertirte y expresarte. Puedes hacer todo tipo de cosas en el juego, como:</p>
7
- <h3>Un mundo virtual donde puedes conocer amigos de todo el mundo</h3>
8
- <p>Puede unirse a un servidor con jugadores de diferentes países y regiones, y comunicarse con ellos mediante el chat de texto o voz. También puede invitar a sus amigos a su casa para una fiesta o para jugar minijuegos juntos. ¡Cuanto más, mejor! </p>
9
- <h3>Una variedad de mini-juegos, actividades y opciones de personalización</h3>
10
- <p>Puedes elegir entre muchos minijuegos diferentes para jugar con tus amigos u otros jugadores en línea. Usted puede correr a la línea de meta, se enfrentan a un enjambre de zombies, tirar abajo en una batalla real, y más. También puede explorar diferentes lugares en el mundo del juego, como la playa, el camping, el parque de atracciones y la ciudad. También puedes personalizar tu personaje de muchas maneras y expresar tu personalidad. Puedes cambiar tu ropa, estilo de cabello, accesorios, expresiones faciales y poses. También puedes decorar tu casa con todo tipo de muebles y artículos que se adapten a tu gusto. </p>
11
- <h3>Un juego gratuito con compras en la aplicación</h3>
12
-
13
- <h2>¿Por qué necesita hack jugar juntos? </h2>
14
- <p>Si bien Play Together es un juego divertido y divertido, también puede ser frustrante si no tienes suficiente dinero, diamantes o gemas. Es posible que te sientas limitado en lo que puedes hacer o comprar en el juego. También puedes sentirte excluido o inferior en comparación con otros jugadores que tienen más recursos que tú. Es por eso que es posible que desee utilizar el juego hack juntos para obtener dinero ilimitado, diamantes y gemas en su cuenta. Estos son algunos de los beneficios de usar hack play juntos:</p>
15
- <h3>Para disfrutar del juego sin gastar dinero real</h3 <p>Con hack jugar juntos, usted no tiene que gastar dinero real para obtener más dinero, diamantes, o gemas en el juego. Puedes conseguir tantos como quieras gratis y usarlos para comprar lo que quieras en el juego. También puedes ahorrar dinero para otras cosas que te importan más en la vida real. </p>
16
- <h3>Para desbloquear todos los artículos, trajes, mascotas y muebles</h3>
17
- <p>Con hack jugar juntos, puede desbloquear todos los artículos, trajes, mascotas y muebles que están disponibles en el juego. No tienes que esperar a que llegue un determinado nivel o evento. También puedes mezclarlos y combinarlos para crear tu propio estilo y apariencia únicos. También puedes impresionar a tus amigos y otros jugadores con tu colección y mostrar tu creatividad. </p>
18
- <p></p>
19
- <h3>Para capturar peces e insectos raros para su colección</h3>
20
- <p>Con hack jugar juntos, se puede coger peces raros e insectos que son difíciles de encontrar en el juego. No tienes que pasar horas pescando o cazando para ellos. También puedes usarlos para decorar tu casa o venderlos por más dinero. También puedes completar tu colección y ganar logros y recompensas. </p>
21
- <h2>Cómo descargar hack jugar juntos? </h2>
22
- <p>Ahora que usted sabe por qué necesita hack jugar juntos, es posible que se pregunte cómo descargarlo y usarlo. Bueno, hay dos formas de hacerlo: la forma ilegal y la forma legal. Veamos cuáles son y cuáles son los riesgos y beneficios de cada uno. </p>
23
-
24
- <p>La forma ilegal de descargar el juego de hackeo es usar programas, aplicaciones o métodos que no están autorizados por los desarrolladores de juegos o las tiendas de aplicaciones. Estos incluyen cosas como:</p>
25
- <ul>
26
- <li>Herramientas de hacking que inyectan código en el juego o modifican sus archivos</li>
27
- <li>Aplicaciones modificadas que han sido manipuladas o alteradas desde la versión original</li>
28
- <li>Métodos no autorizados que explotan fallos o errores en el juego</li>
29
- </ul>
30
- <p>Si bien estos métodos pueden parecer tentadores, también tienen muchos riesgos. Algunos de ellos son:</p>
31
- <ul>
32
- <li>Obtener virus, malware o spyware en su dispositivo que pueden dañar sus datos o privacidad</li>
33
- <li>Ser estafado por sitios web o aplicaciones falsas que piden su información personal o detalles de pago</li>
34
- <li>Ser expulsado del juego o perder tu cuenta por violar los términos del servicio</li>
35
- </ul>
36
- <p>Por lo tanto, no recomendamos usar estos métodos ya que no son seguros, confiables o éticos. </p>
37
- <h3>La forma segura y fácil de utilizar un generador de juego truco juntos</h3>
38
- <p>La forma legal de descargar juego hack juntos es utilizar un generador que es aprobado por los desarrolladores de juegos y las tiendas de aplicaciones. Este es un sitio web que le permite generar dinero ilimitado, diamantes y gemas para su cuenta en pocos minutos. Es seguro, fácil y gratuito. Estos son algunos de los beneficios de usar este método:</p>
39
- <ul>
40
- <li>No hay virus, malware o spyware en su dispositivo, ya que no tiene que descargar nada</li>
41
- <li> No hay estafas o fraudes, ya que no tiene que proporcionar ninguna información personal o detalles de pago</li>
42
- <li>No hay prohibiciones ni pérdidas de cuenta, ya que no infringe ningún término de servicio</li>
43
- </ul>
44
- <p>Por lo tanto, recomendamos usar este método ya que es la mejor manera de obtener recursos ilimitados en Play Together.</p>
45
- <h3>Los pasos a seguir para obtener recursos ilimitados en su cuenta</h3>
46
- <p>Para utilizar el generador de hack play together, solo tiene que seguir estos sencillos pasos:</p>
47
- <ol>
48
-
49
- <li>Introduzca su nombre de usuario o dirección de correo electrónico que utiliza para jugar Play Together</li>
50
- <li>Seleccione el tipo de dispositivo (Android o iOS) y haga clic en Conectar</li>
51
- <li>Seleccione la cantidad de dinero, diamantes y gemas que desea generar y haga clic en Generar</li>
52
- <li>Espere unos segundos mientras el generador procesa su solicitud</li>
53
- <li>Complete un paso rápido de verificación humana para demostrar que no es un robot</li>
54
- <li>Compruebe su cuenta y disfrute de sus recursos ilimitados! </li>
55
- </ol>
56
- <h2>Conclusión</h2>
57
- <p>En conclusión, Play Together es un juego divertido y social donde puedes conocer amigos de todo el mundo, jugar minijuegos, personalizar tu personaje y casa, y criar mascotas. Sin embargo, si desea disfrutar del juego sin gastar dinero real, es posible que desee descargar hack jugar juntos y obtener dinero ilimitado, diamantes y gemas en su cuenta. La mejor manera de hacer eso es utilizar un juego hack junto generador que es seguro, fácil y libre de usar. Entonces, ¿qué estás esperando? Pruebe el juego hack juntos generador de hoy y ver por sí mismo lo divertido que es jugar Play Together con recursos ilimitados. Te sorprenderá lo mucho que puedes hacer y comprar en el juego. También podrás hacer más amigos y divertirte más con ellos. Solo recuerda usar el generador de forma responsable y no abusar de él. Además, sé respetuoso con otros jugadores y no arruines su experiencia de juego. Después de todo, Play Together es un juego que está destinado a reunir a la gente y pasar un buen rato. </p>
58
- <p>Antes de ir, aquí hay algunas preguntas frecuentes que usted podría tener sobre el juego hack juntos:</p>
59
- <h2>Preguntas frecuentes</h2>
60
- <h3>¿Es seguro jugar juntos? </h3>
61
-
62
- <h3>¿Cuánto tiempo se tarda en obtener los recursos en mi cuenta? </h3>
63
- <p>Por lo general, se tarda solo unos minutos para obtener los recursos en su cuenta después de usar el generador de juego hack juntos. Sin embargo, a veces puede tomar más tiempo dependiendo de la carga del servidor o el proceso de verificación. Si no ve los recursos en su cuenta después de 15 minutos, puede volver a intentarlo o ponerse en contacto con el equipo de soporte del generador para obtener ayuda. </p>
64
- <h3>¿Puedo usar hack jugar juntos en cualquier dispositivo? </h3>
65
- <p>Sí, se puede utilizar hack jugar juntos en cualquier dispositivo que puede ejecutar Jugar juntos. Esto incluye dispositivos Android e iOS, así como ordenadores PC y Mac. Solo tiene que tener una conexión a Internet estable y un navegador web para acceder al hack jugar juntos sitio web generador. No es necesario que raíz o jailbreak su dispositivo o instalar cualquier software o aplicación. </p>
66
- <h3>¿Me van a prohibir el uso de hack jugar juntos? </h3>
67
- <p>No, no se le prohibió el uso de hack jugar juntos, siempre y cuando se utiliza moderada y sabiamente. El generador de hack play together utiliza cifrado avanzado y servidores proxy para proteger su cuenta de detección y prohibición. Sin embargo, si lo usas con demasiada frecuencia o en exceso, podrías levantar sospechas de los desarrolladores de juegos u otros jugadores. Por lo tanto, ser inteligente y no exagerar. </p>
68
- <h3>¿Dónde puedo encontrar más información sobre el hack jugar juntos? </h3>
69
- <p>Si desea encontrar más información sobre el juego hack juntos, puede visitar el sitio web oficial del generador o seguir sus páginas de redes sociales. También puede leer comentarios y testimonios de otros usuarios que han utilizado el generador y ver cómo les gustó. También puede ponerse en contacto con el equipo de soporte del generador si tiene alguna pregunta o problema con el servicio. </p> 64aa2da5cf<br />
70
- <br />
71
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/packaging/requirements.py DELETED
@@ -1,146 +0,0 @@
1
- # This file is dual licensed under the terms of the Apache License, Version
2
- # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
- # for complete details.
4
-
5
- import re
6
- import string
7
- import urllib.parse
8
- from typing import List, Optional as TOptional, Set
9
-
10
- from pip._vendor.pyparsing import ( # noqa
11
- Combine,
12
- Literal as L,
13
- Optional,
14
- ParseException,
15
- Regex,
16
- Word,
17
- ZeroOrMore,
18
- originalTextFor,
19
- stringEnd,
20
- stringStart,
21
- )
22
-
23
- from .markers import MARKER_EXPR, Marker
24
- from .specifiers import LegacySpecifier, Specifier, SpecifierSet
25
-
26
-
27
- class InvalidRequirement(ValueError):
28
- """
29
- An invalid requirement was found, users should refer to PEP 508.
30
- """
31
-
32
-
33
- ALPHANUM = Word(string.ascii_letters + string.digits)
34
-
35
- LBRACKET = L("[").suppress()
36
- RBRACKET = L("]").suppress()
37
- LPAREN = L("(").suppress()
38
- RPAREN = L(")").suppress()
39
- COMMA = L(",").suppress()
40
- SEMICOLON = L(";").suppress()
41
- AT = L("@").suppress()
42
-
43
- PUNCTUATION = Word("-_.")
44
- IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
45
- IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
46
-
47
- NAME = IDENTIFIER("name")
48
- EXTRA = IDENTIFIER
49
-
50
- URI = Regex(r"[^ ]+")("url")
51
- URL = AT + URI
52
-
53
- EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
54
- EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
55
-
56
- VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
57
- VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
58
-
59
- VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
60
- VERSION_MANY = Combine(
61
- VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False
62
- )("_raw_spec")
63
- _VERSION_SPEC = Optional((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)
64
- _VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "")
65
-
66
- VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
67
- VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
68
-
69
- MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
70
- MARKER_EXPR.setParseAction(
71
- lambda s, l, t: Marker(s[t._original_start : t._original_end])
72
- )
73
- MARKER_SEPARATOR = SEMICOLON
74
- MARKER = MARKER_SEPARATOR + MARKER_EXPR
75
-
76
- VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
77
- URL_AND_MARKER = URL + Optional(MARKER)
78
-
79
- NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
80
-
81
- REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
82
- # pyparsing isn't thread safe during initialization, so we do it eagerly, see
83
- # issue #104
84
- REQUIREMENT.parseString("x[]")
85
-
86
-
87
- class Requirement:
88
- """Parse a requirement.
89
-
90
- Parse a given requirement string into its parts, such as name, specifier,
91
- URL, and extras. Raises InvalidRequirement on a badly-formed requirement
92
- string.
93
- """
94
-
95
- # TODO: Can we test whether something is contained within a requirement?
96
- # If so how do we do that? Do we need to test against the _name_ of
97
- # the thing as well as the version? What about the markers?
98
- # TODO: Can we normalize the name and extra name?
99
-
100
- def __init__(self, requirement_string: str) -> None:
101
- try:
102
- req = REQUIREMENT.parseString(requirement_string)
103
- except ParseException as e:
104
- raise InvalidRequirement(
105
- f'Parse error at "{ requirement_string[e.loc : e.loc + 8]!r}": {e.msg}'
106
- )
107
-
108
- self.name: str = req.name
109
- if req.url:
110
- parsed_url = urllib.parse.urlparse(req.url)
111
- if parsed_url.scheme == "file":
112
- if urllib.parse.urlunparse(parsed_url) != req.url:
113
- raise InvalidRequirement("Invalid URL given")
114
- elif not (parsed_url.scheme and parsed_url.netloc) or (
115
- not parsed_url.scheme and not parsed_url.netloc
116
- ):
117
- raise InvalidRequirement(f"Invalid URL: {req.url}")
118
- self.url: TOptional[str] = req.url
119
- else:
120
- self.url = None
121
- self.extras: Set[str] = set(req.extras.asList() if req.extras else [])
122
- self.specifier: SpecifierSet = SpecifierSet(req.specifier)
123
- self.marker: TOptional[Marker] = req.marker if req.marker else None
124
-
125
- def __str__(self) -> str:
126
- parts: List[str] = [self.name]
127
-
128
- if self.extras:
129
- formatted_extras = ",".join(sorted(self.extras))
130
- parts.append(f"[{formatted_extras}]")
131
-
132
- if self.specifier:
133
- parts.append(str(self.specifier))
134
-
135
- if self.url:
136
- parts.append(f"@ {self.url}")
137
- if self.marker:
138
- parts.append(" ")
139
-
140
- if self.marker:
141
- parts.append(f"; {self.marker}")
142
-
143
- return "".join(parts)
144
-
145
- def __repr__(self) -> str:
146
- return f"<Requirement('{self}')>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/rule.py DELETED
@@ -1,130 +0,0 @@
1
- from typing import Union
2
-
3
- from .align import AlignMethod
4
- from .cells import cell_len, set_cell_size
5
- from .console import Console, ConsoleOptions, RenderResult
6
- from .jupyter import JupyterMixin
7
- from .measure import Measurement
8
- from .style import Style
9
- from .text import Text
10
-
11
-
12
- class Rule(JupyterMixin):
13
- """A console renderable to draw a horizontal rule (line).
14
-
15
- Args:
16
- title (Union[str, Text], optional): Text to render in the rule. Defaults to "".
17
- characters (str, optional): Character(s) used to draw the line. Defaults to "─".
18
- style (StyleType, optional): Style of Rule. Defaults to "rule.line".
19
- end (str, optional): Character at end of Rule. defaults to "\\\\n"
20
- align (str, optional): How to align the title, one of "left", "center", or "right". Defaults to "center".
21
- """
22
-
23
- def __init__(
24
- self,
25
- title: Union[str, Text] = "",
26
- *,
27
- characters: str = "─",
28
- style: Union[str, Style] = "rule.line",
29
- end: str = "\n",
30
- align: AlignMethod = "center",
31
- ) -> None:
32
- if cell_len(characters) < 1:
33
- raise ValueError(
34
- "'characters' argument must have a cell width of at least 1"
35
- )
36
- if align not in ("left", "center", "right"):
37
- raise ValueError(
38
- f'invalid value for align, expected "left", "center", "right" (not {align!r})'
39
- )
40
- self.title = title
41
- self.characters = characters
42
- self.style = style
43
- self.end = end
44
- self.align = align
45
-
46
- def __repr__(self) -> str:
47
- return f"Rule({self.title!r}, {self.characters!r})"
48
-
49
- def __rich_console__(
50
- self, console: Console, options: ConsoleOptions
51
- ) -> RenderResult:
52
- width = options.max_width
53
-
54
- characters = (
55
- "-"
56
- if (options.ascii_only and not self.characters.isascii())
57
- else self.characters
58
- )
59
-
60
- chars_len = cell_len(characters)
61
- if not self.title:
62
- yield self._rule_line(chars_len, width)
63
- return
64
-
65
- if isinstance(self.title, Text):
66
- title_text = self.title
67
- else:
68
- title_text = console.render_str(self.title, style="rule.text")
69
-
70
- title_text.plain = title_text.plain.replace("\n", " ")
71
- title_text.expand_tabs()
72
-
73
- required_space = 4 if self.align == "center" else 2
74
- truncate_width = max(0, width - required_space)
75
- if not truncate_width:
76
- yield self._rule_line(chars_len, width)
77
- return
78
-
79
- rule_text = Text(end=self.end)
80
- if self.align == "center":
81
- title_text.truncate(truncate_width, overflow="ellipsis")
82
- side_width = (width - cell_len(title_text.plain)) // 2
83
- left = Text(characters * (side_width // chars_len + 1))
84
- left.truncate(side_width - 1)
85
- right_length = width - cell_len(left.plain) - cell_len(title_text.plain)
86
- right = Text(characters * (side_width // chars_len + 1))
87
- right.truncate(right_length)
88
- rule_text.append(left.plain + " ", self.style)
89
- rule_text.append(title_text)
90
- rule_text.append(" " + right.plain, self.style)
91
- elif self.align == "left":
92
- title_text.truncate(truncate_width, overflow="ellipsis")
93
- rule_text.append(title_text)
94
- rule_text.append(" ")
95
- rule_text.append(characters * (width - rule_text.cell_len), self.style)
96
- elif self.align == "right":
97
- title_text.truncate(truncate_width, overflow="ellipsis")
98
- rule_text.append(characters * (width - title_text.cell_len - 1), self.style)
99
- rule_text.append(" ")
100
- rule_text.append(title_text)
101
-
102
- rule_text.plain = set_cell_size(rule_text.plain, width)
103
- yield rule_text
104
-
105
- def _rule_line(self, chars_len: int, width: int) -> Text:
106
- rule_text = Text(self.characters * ((width // chars_len) + 1), self.style)
107
- rule_text.truncate(width)
108
- rule_text.plain = set_cell_size(rule_text.plain, width)
109
- return rule_text
110
-
111
- def __rich_measure__(
112
- self, console: Console, options: ConsoleOptions
113
- ) -> Measurement:
114
- return Measurement(1, 1)
115
-
116
-
117
- if __name__ == "__main__": # pragma: no cover
118
- import sys
119
-
120
- from pip._vendor.rich.console import Console
121
-
122
- try:
123
- text = sys.argv[1]
124
- except IndexError:
125
- text = "Hello, World"
126
- console = Console()
127
- console.print(Rule(title=text))
128
-
129
- console = Console()
130
- console.print(Rule("foo"), width=4)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/traceback.py DELETED
@@ -1,756 +0,0 @@
1
- from __future__ import absolute_import
2
-
3
- import linecache
4
- import os
5
- import platform
6
- import sys
7
- from dataclasses import dataclass, field
8
- from traceback import walk_tb
9
- from types import ModuleType, TracebackType
10
- from typing import (
11
- Any,
12
- Callable,
13
- Dict,
14
- Iterable,
15
- List,
16
- Optional,
17
- Sequence,
18
- Tuple,
19
- Type,
20
- Union,
21
- )
22
-
23
- from pip._vendor.pygments.lexers import guess_lexer_for_filename
24
- from pip._vendor.pygments.token import Comment, Keyword, Name, Number, Operator, String
25
- from pip._vendor.pygments.token import Text as TextToken
26
- from pip._vendor.pygments.token import Token
27
- from pip._vendor.pygments.util import ClassNotFound
28
-
29
- from . import pretty
30
- from ._loop import loop_last
31
- from .columns import Columns
32
- from .console import Console, ConsoleOptions, ConsoleRenderable, RenderResult, group
33
- from .constrain import Constrain
34
- from .highlighter import RegexHighlighter, ReprHighlighter
35
- from .panel import Panel
36
- from .scope import render_scope
37
- from .style import Style
38
- from .syntax import Syntax
39
- from .text import Text
40
- from .theme import Theme
41
-
42
- WINDOWS = platform.system() == "Windows"
43
-
44
- LOCALS_MAX_LENGTH = 10
45
- LOCALS_MAX_STRING = 80
46
-
47
-
48
- def install(
49
- *,
50
- console: Optional[Console] = None,
51
- width: Optional[int] = 100,
52
- extra_lines: int = 3,
53
- theme: Optional[str] = None,
54
- word_wrap: bool = False,
55
- show_locals: bool = False,
56
- locals_max_length: int = LOCALS_MAX_LENGTH,
57
- locals_max_string: int = LOCALS_MAX_STRING,
58
- locals_hide_dunder: bool = True,
59
- locals_hide_sunder: Optional[bool] = None,
60
- indent_guides: bool = True,
61
- suppress: Iterable[Union[str, ModuleType]] = (),
62
- max_frames: int = 100,
63
- ) -> Callable[[Type[BaseException], BaseException, Optional[TracebackType]], Any]:
64
- """Install a rich traceback handler.
65
-
66
- Once installed, any tracebacks will be printed with syntax highlighting and rich formatting.
67
-
68
-
69
- Args:
70
- console (Optional[Console], optional): Console to write exception to. Default uses internal Console instance.
71
- width (Optional[int], optional): Width (in characters) of traceback. Defaults to 100.
72
- extra_lines (int, optional): Extra lines of code. Defaults to 3.
73
- theme (Optional[str], optional): Pygments theme to use in traceback. Defaults to ``None`` which will pick
74
- a theme appropriate for the platform.
75
- word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False.
76
- show_locals (bool, optional): Enable display of local variables. Defaults to False.
77
- locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
78
- Defaults to 10.
79
- locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80.
80
- locals_hide_dunder (bool, optional): Hide locals prefixed with double underscore. Defaults to True.
81
- locals_hide_sunder (bool, optional): Hide locals prefixed with single underscore. Defaults to False.
82
- indent_guides (bool, optional): Enable indent guides in code and locals. Defaults to True.
83
- suppress (Sequence[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback.
84
-
85
- Returns:
86
- Callable: The previous exception handler that was replaced.
87
-
88
- """
89
- traceback_console = Console(stderr=True) if console is None else console
90
-
91
- locals_hide_sunder = (
92
- True
93
- if (traceback_console.is_jupyter and locals_hide_sunder is None)
94
- else locals_hide_sunder
95
- )
96
-
97
- def excepthook(
98
- type_: Type[BaseException],
99
- value: BaseException,
100
- traceback: Optional[TracebackType],
101
- ) -> None:
102
- traceback_console.print(
103
- Traceback.from_exception(
104
- type_,
105
- value,
106
- traceback,
107
- width=width,
108
- extra_lines=extra_lines,
109
- theme=theme,
110
- word_wrap=word_wrap,
111
- show_locals=show_locals,
112
- locals_max_length=locals_max_length,
113
- locals_max_string=locals_max_string,
114
- locals_hide_dunder=locals_hide_dunder,
115
- locals_hide_sunder=bool(locals_hide_sunder),
116
- indent_guides=indent_guides,
117
- suppress=suppress,
118
- max_frames=max_frames,
119
- )
120
- )
121
-
122
- def ipy_excepthook_closure(ip: Any) -> None: # pragma: no cover
123
- tb_data = {} # store information about showtraceback call
124
- default_showtraceback = ip.showtraceback # keep reference of default traceback
125
-
126
- def ipy_show_traceback(*args: Any, **kwargs: Any) -> None:
127
- """wrap the default ip.showtraceback to store info for ip._showtraceback"""
128
- nonlocal tb_data
129
- tb_data = kwargs
130
- default_showtraceback(*args, **kwargs)
131
-
132
- def ipy_display_traceback(
133
- *args: Any, is_syntax: bool = False, **kwargs: Any
134
- ) -> None:
135
- """Internally called traceback from ip._showtraceback"""
136
- nonlocal tb_data
137
- exc_tuple = ip._get_exc_info()
138
-
139
- # do not display trace on syntax error
140
- tb: Optional[TracebackType] = None if is_syntax else exc_tuple[2]
141
-
142
- # determine correct tb_offset
143
- compiled = tb_data.get("running_compiled_code", False)
144
- tb_offset = tb_data.get("tb_offset", 1 if compiled else 0)
145
- # remove ipython internal frames from trace with tb_offset
146
- for _ in range(tb_offset):
147
- if tb is None:
148
- break
149
- tb = tb.tb_next
150
-
151
- excepthook(exc_tuple[0], exc_tuple[1], tb)
152
- tb_data = {} # clear data upon usage
153
-
154
- # replace _showtraceback instead of showtraceback to allow ipython features such as debugging to work
155
- # this is also what the ipython docs recommends to modify when subclassing InteractiveShell
156
- ip._showtraceback = ipy_display_traceback
157
- # add wrapper to capture tb_data
158
- ip.showtraceback = ipy_show_traceback
159
- ip.showsyntaxerror = lambda *args, **kwargs: ipy_display_traceback(
160
- *args, is_syntax=True, **kwargs
161
- )
162
-
163
- try: # pragma: no cover
164
- # if within ipython, use customized traceback
165
- ip = get_ipython() # type: ignore[name-defined]
166
- ipy_excepthook_closure(ip)
167
- return sys.excepthook
168
- except Exception:
169
- # otherwise use default system hook
170
- old_excepthook = sys.excepthook
171
- sys.excepthook = excepthook
172
- return old_excepthook
173
-
174
-
175
- @dataclass
176
- class Frame:
177
- filename: str
178
- lineno: int
179
- name: str
180
- line: str = ""
181
- locals: Optional[Dict[str, pretty.Node]] = None
182
-
183
-
184
- @dataclass
185
- class _SyntaxError:
186
- offset: int
187
- filename: str
188
- line: str
189
- lineno: int
190
- msg: str
191
-
192
-
193
- @dataclass
194
- class Stack:
195
- exc_type: str
196
- exc_value: str
197
- syntax_error: Optional[_SyntaxError] = None
198
- is_cause: bool = False
199
- frames: List[Frame] = field(default_factory=list)
200
-
201
-
202
- @dataclass
203
- class Trace:
204
- stacks: List[Stack]
205
-
206
-
207
- class PathHighlighter(RegexHighlighter):
208
- highlights = [r"(?P<dim>.*/)(?P<bold>.+)"]
209
-
210
-
211
- class Traceback:
212
- """A Console renderable that renders a traceback.
213
-
214
- Args:
215
- trace (Trace, optional): A `Trace` object produced from `extract`. Defaults to None, which uses
216
- the last exception.
217
- width (Optional[int], optional): Number of characters used to traceback. Defaults to 100.
218
- extra_lines (int, optional): Additional lines of code to render. Defaults to 3.
219
- theme (str, optional): Override pygments theme used in traceback.
220
- word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False.
221
- show_locals (bool, optional): Enable display of local variables. Defaults to False.
222
- indent_guides (bool, optional): Enable indent guides in code and locals. Defaults to True.
223
- locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
224
- Defaults to 10.
225
- locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80.
226
- locals_hide_dunder (bool, optional): Hide locals prefixed with double underscore. Defaults to True.
227
- locals_hide_sunder (bool, optional): Hide locals prefixed with single underscore. Defaults to False.
228
- suppress (Sequence[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback.
229
- max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.
230
-
231
- """
232
-
233
- LEXERS = {
234
- "": "text",
235
- ".py": "python",
236
- ".pxd": "cython",
237
- ".pyx": "cython",
238
- ".pxi": "pyrex",
239
- }
240
-
241
- def __init__(
242
- self,
243
- trace: Optional[Trace] = None,
244
- *,
245
- width: Optional[int] = 100,
246
- extra_lines: int = 3,
247
- theme: Optional[str] = None,
248
- word_wrap: bool = False,
249
- show_locals: bool = False,
250
- locals_max_length: int = LOCALS_MAX_LENGTH,
251
- locals_max_string: int = LOCALS_MAX_STRING,
252
- locals_hide_dunder: bool = True,
253
- locals_hide_sunder: bool = False,
254
- indent_guides: bool = True,
255
- suppress: Iterable[Union[str, ModuleType]] = (),
256
- max_frames: int = 100,
257
- ):
258
- if trace is None:
259
- exc_type, exc_value, traceback = sys.exc_info()
260
- if exc_type is None or exc_value is None or traceback is None:
261
- raise ValueError(
262
- "Value for 'trace' required if not called in except: block"
263
- )
264
- trace = self.extract(
265
- exc_type, exc_value, traceback, show_locals=show_locals
266
- )
267
- self.trace = trace
268
- self.width = width
269
- self.extra_lines = extra_lines
270
- self.theme = Syntax.get_theme(theme or "ansi_dark")
271
- self.word_wrap = word_wrap
272
- self.show_locals = show_locals
273
- self.indent_guides = indent_guides
274
- self.locals_max_length = locals_max_length
275
- self.locals_max_string = locals_max_string
276
- self.locals_hide_dunder = locals_hide_dunder
277
- self.locals_hide_sunder = locals_hide_sunder
278
-
279
- self.suppress: Sequence[str] = []
280
- for suppress_entity in suppress:
281
- if not isinstance(suppress_entity, str):
282
- assert (
283
- suppress_entity.__file__ is not None
284
- ), f"{suppress_entity!r} must be a module with '__file__' attribute"
285
- path = os.path.dirname(suppress_entity.__file__)
286
- else:
287
- path = suppress_entity
288
- path = os.path.normpath(os.path.abspath(path))
289
- self.suppress.append(path)
290
- self.max_frames = max(4, max_frames) if max_frames > 0 else 0
291
-
292
- @classmethod
293
- def from_exception(
294
- cls,
295
- exc_type: Type[Any],
296
- exc_value: BaseException,
297
- traceback: Optional[TracebackType],
298
- *,
299
- width: Optional[int] = 100,
300
- extra_lines: int = 3,
301
- theme: Optional[str] = None,
302
- word_wrap: bool = False,
303
- show_locals: bool = False,
304
- locals_max_length: int = LOCALS_MAX_LENGTH,
305
- locals_max_string: int = LOCALS_MAX_STRING,
306
- locals_hide_dunder: bool = True,
307
- locals_hide_sunder: bool = False,
308
- indent_guides: bool = True,
309
- suppress: Iterable[Union[str, ModuleType]] = (),
310
- max_frames: int = 100,
311
- ) -> "Traceback":
312
- """Create a traceback from exception info
313
-
314
- Args:
315
- exc_type (Type[BaseException]): Exception type.
316
- exc_value (BaseException): Exception value.
317
- traceback (TracebackType): Python Traceback object.
318
- width (Optional[int], optional): Number of characters used to traceback. Defaults to 100.
319
- extra_lines (int, optional): Additional lines of code to render. Defaults to 3.
320
- theme (str, optional): Override pygments theme used in traceback.
321
- word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False.
322
- show_locals (bool, optional): Enable display of local variables. Defaults to False.
323
- indent_guides (bool, optional): Enable indent guides in code and locals. Defaults to True.
324
- locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
325
- Defaults to 10.
326
- locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80.
327
- locals_hide_dunder (bool, optional): Hide locals prefixed with double underscore. Defaults to True.
328
- locals_hide_sunder (bool, optional): Hide locals prefixed with single underscore. Defaults to False.
329
- suppress (Iterable[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback.
330
- max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.
331
-
332
- Returns:
333
- Traceback: A Traceback instance that may be printed.
334
- """
335
- rich_traceback = cls.extract(
336
- exc_type,
337
- exc_value,
338
- traceback,
339
- show_locals=show_locals,
340
- locals_max_length=locals_max_length,
341
- locals_max_string=locals_max_string,
342
- locals_hide_dunder=locals_hide_dunder,
343
- locals_hide_sunder=locals_hide_sunder,
344
- )
345
-
346
- return cls(
347
- rich_traceback,
348
- width=width,
349
- extra_lines=extra_lines,
350
- theme=theme,
351
- word_wrap=word_wrap,
352
- show_locals=show_locals,
353
- indent_guides=indent_guides,
354
- locals_max_length=locals_max_length,
355
- locals_max_string=locals_max_string,
356
- locals_hide_dunder=locals_hide_dunder,
357
- locals_hide_sunder=locals_hide_sunder,
358
- suppress=suppress,
359
- max_frames=max_frames,
360
- )
361
-
362
- @classmethod
363
- def extract(
364
- cls,
365
- exc_type: Type[BaseException],
366
- exc_value: BaseException,
367
- traceback: Optional[TracebackType],
368
- *,
369
- show_locals: bool = False,
370
- locals_max_length: int = LOCALS_MAX_LENGTH,
371
- locals_max_string: int = LOCALS_MAX_STRING,
372
- locals_hide_dunder: bool = True,
373
- locals_hide_sunder: bool = False,
374
- ) -> Trace:
375
- """Extract traceback information.
376
-
377
- Args:
378
- exc_type (Type[BaseException]): Exception type.
379
- exc_value (BaseException): Exception value.
380
- traceback (TracebackType): Python Traceback object.
381
- show_locals (bool, optional): Enable display of local variables. Defaults to False.
382
- locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
383
- Defaults to 10.
384
- locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80.
385
- locals_hide_dunder (bool, optional): Hide locals prefixed with double underscore. Defaults to True.
386
- locals_hide_sunder (bool, optional): Hide locals prefixed with single underscore. Defaults to False.
387
-
388
- Returns:
389
- Trace: A Trace instance which you can use to construct a `Traceback`.
390
- """
391
-
392
- stacks: List[Stack] = []
393
- is_cause = False
394
-
395
- from pip._vendor.rich import _IMPORT_CWD
396
-
397
- def safe_str(_object: Any) -> str:
398
- """Don't allow exceptions from __str__ to propagate."""
399
- try:
400
- return str(_object)
401
- except Exception:
402
- return "<exception str() failed>"
403
-
404
- while True:
405
- stack = Stack(
406
- exc_type=safe_str(exc_type.__name__),
407
- exc_value=safe_str(exc_value),
408
- is_cause=is_cause,
409
- )
410
-
411
- if isinstance(exc_value, SyntaxError):
412
- stack.syntax_error = _SyntaxError(
413
- offset=exc_value.offset or 0,
414
- filename=exc_value.filename or "?",
415
- lineno=exc_value.lineno or 0,
416
- line=exc_value.text or "",
417
- msg=exc_value.msg,
418
- )
419
-
420
- stacks.append(stack)
421
- append = stack.frames.append
422
-
423
- def get_locals(
424
- iter_locals: Iterable[Tuple[str, object]]
425
- ) -> Iterable[Tuple[str, object]]:
426
- """Extract locals from an iterator of key pairs."""
427
- if not (locals_hide_dunder or locals_hide_sunder):
428
- yield from iter_locals
429
- return
430
- for key, value in iter_locals:
431
- if locals_hide_dunder and key.startswith("__"):
432
- continue
433
- if locals_hide_sunder and key.startswith("_"):
434
- continue
435
- yield key, value
436
-
437
- for frame_summary, line_no in walk_tb(traceback):
438
- filename = frame_summary.f_code.co_filename
439
- if filename and not filename.startswith("<"):
440
- if not os.path.isabs(filename):
441
- filename = os.path.join(_IMPORT_CWD, filename)
442
- if frame_summary.f_locals.get("_rich_traceback_omit", False):
443
- continue
444
-
445
- frame = Frame(
446
- filename=filename or "?",
447
- lineno=line_no,
448
- name=frame_summary.f_code.co_name,
449
- locals={
450
- key: pretty.traverse(
451
- value,
452
- max_length=locals_max_length,
453
- max_string=locals_max_string,
454
- )
455
- for key, value in get_locals(frame_summary.f_locals.items())
456
- }
457
- if show_locals
458
- else None,
459
- )
460
- append(frame)
461
- if frame_summary.f_locals.get("_rich_traceback_guard", False):
462
- del stack.frames[:]
463
-
464
- cause = getattr(exc_value, "__cause__", None)
465
- if cause:
466
- exc_type = cause.__class__
467
- exc_value = cause
468
- # __traceback__ can be None, e.g. for exceptions raised by the
469
- # 'multiprocessing' module
470
- traceback = cause.__traceback__
471
- is_cause = True
472
- continue
473
-
474
- cause = exc_value.__context__
475
- if cause and not getattr(exc_value, "__suppress_context__", False):
476
- exc_type = cause.__class__
477
- exc_value = cause
478
- traceback = cause.__traceback__
479
- is_cause = False
480
- continue
481
- # No cover, code is reached but coverage doesn't recognize it.
482
- break # pragma: no cover
483
-
484
- trace = Trace(stacks=stacks)
485
- return trace
486
-
487
- def __rich_console__(
488
- self, console: Console, options: ConsoleOptions
489
- ) -> RenderResult:
490
- theme = self.theme
491
- background_style = theme.get_background_style()
492
- token_style = theme.get_style_for_token
493
-
494
- traceback_theme = Theme(
495
- {
496
- "pretty": token_style(TextToken),
497
- "pygments.text": token_style(Token),
498
- "pygments.string": token_style(String),
499
- "pygments.function": token_style(Name.Function),
500
- "pygments.number": token_style(Number),
501
- "repr.indent": token_style(Comment) + Style(dim=True),
502
- "repr.str": token_style(String),
503
- "repr.brace": token_style(TextToken) + Style(bold=True),
504
- "repr.number": token_style(Number),
505
- "repr.bool_true": token_style(Keyword.Constant),
506
- "repr.bool_false": token_style(Keyword.Constant),
507
- "repr.none": token_style(Keyword.Constant),
508
- "scope.border": token_style(String.Delimiter),
509
- "scope.equals": token_style(Operator),
510
- "scope.key": token_style(Name),
511
- "scope.key.special": token_style(Name.Constant) + Style(dim=True),
512
- },
513
- inherit=False,
514
- )
515
-
516
- highlighter = ReprHighlighter()
517
- for last, stack in loop_last(reversed(self.trace.stacks)):
518
- if stack.frames:
519
- stack_renderable: ConsoleRenderable = Panel(
520
- self._render_stack(stack),
521
- title="[traceback.title]Traceback [dim](most recent call last)",
522
- style=background_style,
523
- border_style="traceback.border",
524
- expand=True,
525
- padding=(0, 1),
526
- )
527
- stack_renderable = Constrain(stack_renderable, self.width)
528
- with console.use_theme(traceback_theme):
529
- yield stack_renderable
530
- if stack.syntax_error is not None:
531
- with console.use_theme(traceback_theme):
532
- yield Constrain(
533
- Panel(
534
- self._render_syntax_error(stack.syntax_error),
535
- style=background_style,
536
- border_style="traceback.border.syntax_error",
537
- expand=True,
538
- padding=(0, 1),
539
- width=self.width,
540
- ),
541
- self.width,
542
- )
543
- yield Text.assemble(
544
- (f"{stack.exc_type}: ", "traceback.exc_type"),
545
- highlighter(stack.syntax_error.msg),
546
- )
547
- elif stack.exc_value:
548
- yield Text.assemble(
549
- (f"{stack.exc_type}: ", "traceback.exc_type"),
550
- highlighter(stack.exc_value),
551
- )
552
- else:
553
- yield Text.assemble((f"{stack.exc_type}", "traceback.exc_type"))
554
-
555
- if not last:
556
- if stack.is_cause:
557
- yield Text.from_markup(
558
- "\n[i]The above exception was the direct cause of the following exception:\n",
559
- )
560
- else:
561
- yield Text.from_markup(
562
- "\n[i]During handling of the above exception, another exception occurred:\n",
563
- )
564
-
565
- @group()
566
- def _render_syntax_error(self, syntax_error: _SyntaxError) -> RenderResult:
567
- highlighter = ReprHighlighter()
568
- path_highlighter = PathHighlighter()
569
- if syntax_error.filename != "<stdin>":
570
- if os.path.exists(syntax_error.filename):
571
- text = Text.assemble(
572
- (f" {syntax_error.filename}", "pygments.string"),
573
- (":", "pygments.text"),
574
- (str(syntax_error.lineno), "pygments.number"),
575
- style="pygments.text",
576
- )
577
- yield path_highlighter(text)
578
- syntax_error_text = highlighter(syntax_error.line.rstrip())
579
- syntax_error_text.no_wrap = True
580
- offset = min(syntax_error.offset - 1, len(syntax_error_text))
581
- syntax_error_text.stylize("bold underline", offset, offset)
582
- syntax_error_text += Text.from_markup(
583
- "\n" + " " * offset + "[traceback.offset]▲[/]",
584
- style="pygments.text",
585
- )
586
- yield syntax_error_text
587
-
588
- @classmethod
589
- def _guess_lexer(cls, filename: str, code: str) -> str:
590
- ext = os.path.splitext(filename)[-1]
591
- if not ext:
592
- # No extension, look at first line to see if it is a hashbang
593
- # Note, this is an educated guess and not a guarantee
594
- # If it fails, the only downside is that the code is highlighted strangely
595
- new_line_index = code.index("\n")
596
- first_line = code[:new_line_index] if new_line_index != -1 else code
597
- if first_line.startswith("#!") and "python" in first_line.lower():
598
- return "python"
599
- try:
600
- return cls.LEXERS.get(ext) or guess_lexer_for_filename(filename, code).name
601
- except ClassNotFound:
602
- return "text"
603
-
604
- @group()
605
- def _render_stack(self, stack: Stack) -> RenderResult:
606
- path_highlighter = PathHighlighter()
607
- theme = self.theme
608
-
609
- def read_code(filename: str) -> str:
610
- """Read files, and cache results on filename.
611
-
612
- Args:
613
- filename (str): Filename to read
614
-
615
- Returns:
616
- str: Contents of file
617
- """
618
- return "".join(linecache.getlines(filename))
619
-
620
- def render_locals(frame: Frame) -> Iterable[ConsoleRenderable]:
621
- if frame.locals:
622
- yield render_scope(
623
- frame.locals,
624
- title="locals",
625
- indent_guides=self.indent_guides,
626
- max_length=self.locals_max_length,
627
- max_string=self.locals_max_string,
628
- )
629
-
630
- exclude_frames: Optional[range] = None
631
- if self.max_frames != 0:
632
- exclude_frames = range(
633
- self.max_frames // 2,
634
- len(stack.frames) - self.max_frames // 2,
635
- )
636
-
637
- excluded = False
638
- for frame_index, frame in enumerate(stack.frames):
639
-
640
- if exclude_frames and frame_index in exclude_frames:
641
- excluded = True
642
- continue
643
-
644
- if excluded:
645
- assert exclude_frames is not None
646
- yield Text(
647
- f"\n... {len(exclude_frames)} frames hidden ...",
648
- justify="center",
649
- style="traceback.error",
650
- )
651
- excluded = False
652
-
653
- first = frame_index == 0
654
- frame_filename = frame.filename
655
- suppressed = any(frame_filename.startswith(path) for path in self.suppress)
656
-
657
- if os.path.exists(frame.filename):
658
- text = Text.assemble(
659
- path_highlighter(Text(frame.filename, style="pygments.string")),
660
- (":", "pygments.text"),
661
- (str(frame.lineno), "pygments.number"),
662
- " in ",
663
- (frame.name, "pygments.function"),
664
- style="pygments.text",
665
- )
666
- else:
667
- text = Text.assemble(
668
- "in ",
669
- (frame.name, "pygments.function"),
670
- (":", "pygments.text"),
671
- (str(frame.lineno), "pygments.number"),
672
- style="pygments.text",
673
- )
674
- if not frame.filename.startswith("<") and not first:
675
- yield ""
676
- yield text
677
- if frame.filename.startswith("<"):
678
- yield from render_locals(frame)
679
- continue
680
- if not suppressed:
681
- try:
682
- code = read_code(frame.filename)
683
- if not code:
684
- # code may be an empty string if the file doesn't exist, OR
685
- # if the traceback filename is generated dynamically
686
- continue
687
- lexer_name = self._guess_lexer(frame.filename, code)
688
- syntax = Syntax(
689
- code,
690
- lexer_name,
691
- theme=theme,
692
- line_numbers=True,
693
- line_range=(
694
- frame.lineno - self.extra_lines,
695
- frame.lineno + self.extra_lines,
696
- ),
697
- highlight_lines={frame.lineno},
698
- word_wrap=self.word_wrap,
699
- code_width=88,
700
- indent_guides=self.indent_guides,
701
- dedent=False,
702
- )
703
- yield ""
704
- except Exception as error:
705
- yield Text.assemble(
706
- (f"\n{error}", "traceback.error"),
707
- )
708
- else:
709
- yield (
710
- Columns(
711
- [
712
- syntax,
713
- *render_locals(frame),
714
- ],
715
- padding=1,
716
- )
717
- if frame.locals
718
- else syntax
719
- )
720
-
721
-
722
- if __name__ == "__main__": # pragma: no cover
723
-
724
- from .console import Console
725
-
726
- console = Console()
727
- import sys
728
-
729
- def bar(a: Any) -> None: # 这是对亚洲语言支持的测试。面对模棱两可的想法,拒绝猜测的诱惑
730
- one = 1
731
- print(one / a)
732
-
733
- def foo(a: Any) -> None:
734
- _rich_traceback_guard = True
735
- zed = {
736
- "characters": {
737
- "Paul Atreides",
738
- "Vladimir Harkonnen",
739
- "Thufir Hawat",
740
- "Duncan Idaho",
741
- },
742
- "atomic_types": (None, False, True),
743
- }
744
- bar(a)
745
-
746
- def error() -> None:
747
-
748
- try:
749
- try:
750
- foo(0)
751
- except:
752
- slfkjsldkfj # type: ignore[name-defined]
753
- except:
754
- console.print_exception(show_locals=True)
755
-
756
- error()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BlitzEsports/TextToImage/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: DALL·E mini
3
- metaTitle: "DALL·E mini by craiyon.com on Hugging Face"
4
- emoji: 🥑
5
- colorFrom: yellow
6
- colorTo: green
7
- sdk: static
8
- pinned: True
9
- license: apache-2.0
10
- ---
 
 
 
 
 
 
 
 
 
 
 
spaces/BlitzKriegM/argilla/README.md DELETED
@@ -1,19 +0,0 @@
1
- ---
2
- title: Argilla Space Template
3
- emoji: 🏷️
4
- colorFrom: purple
5
- colorTo: red
6
- sdk: docker
7
- app_port: 6900
8
- fullWidth: true
9
- tags:
10
- - argilla
11
- duplicated_from: fka/awesome-chatgpt-prompts
12
- ---
13
-
14
- This is the Argilla Space Template you can use to deploy and run your own instance of Argilla on the Hugging Face Hub, for labeling, fun, and active learning loops!
15
-
16
- Login with:
17
-
18
- user: argilla
19
- password: 1234
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/evaluation/rotated_coco_evaluation.py DELETED
@@ -1,203 +0,0 @@
1
- import itertools
2
- import json
3
- import numpy as np
4
- import os
5
- import torch
6
- from fvcore.common.file_io import PathManager
7
- from pycocotools.cocoeval import COCOeval, maskUtils
8
-
9
- from detectron2.structures import BoxMode, RotatedBoxes, pairwise_iou_rotated
10
-
11
- from .coco_evaluation import COCOEvaluator
12
-
13
-
14
- class RotatedCOCOeval(COCOeval):
15
- @staticmethod
16
- def is_rotated(box_list):
17
- if type(box_list) == np.ndarray:
18
- return box_list.shape[1] == 5
19
- elif type(box_list) == list:
20
- if box_list == []: # cannot decide the box_dim
21
- return False
22
- return np.all(
23
- np.array(
24
- [
25
- (len(obj) == 5) and ((type(obj) == list) or (type(obj) == np.ndarray))
26
- for obj in box_list
27
- ]
28
- )
29
- )
30
- return False
31
-
32
- @staticmethod
33
- def boxlist_to_tensor(boxlist, output_box_dim):
34
- if type(boxlist) == np.ndarray:
35
- box_tensor = torch.from_numpy(boxlist)
36
- elif type(boxlist) == list:
37
- if boxlist == []:
38
- return torch.zeros((0, output_box_dim), dtype=torch.float32)
39
- else:
40
- box_tensor = torch.FloatTensor(boxlist)
41
- else:
42
- raise Exception("Unrecognized boxlist type")
43
-
44
- input_box_dim = box_tensor.shape[1]
45
- if input_box_dim != output_box_dim:
46
- if input_box_dim == 4 and output_box_dim == 5:
47
- box_tensor = BoxMode.convert(box_tensor, BoxMode.XYWH_ABS, BoxMode.XYWHA_ABS)
48
- else:
49
- raise Exception(
50
- "Unable to convert from {}-dim box to {}-dim box".format(
51
- input_box_dim, output_box_dim
52
- )
53
- )
54
- return box_tensor
55
-
56
- def compute_iou_dt_gt(self, dt, gt, is_crowd):
57
- if self.is_rotated(dt) or self.is_rotated(gt):
58
- # TODO: take is_crowd into consideration
59
- assert all(c == 0 for c in is_crowd)
60
- dt = RotatedBoxes(self.boxlist_to_tensor(dt, output_box_dim=5))
61
- gt = RotatedBoxes(self.boxlist_to_tensor(gt, output_box_dim=5))
62
- return pairwise_iou_rotated(dt, gt)
63
- else:
64
- # This is the same as the classical COCO evaluation
65
- return maskUtils.iou(dt, gt, is_crowd)
66
-
67
- def computeIoU(self, imgId, catId):
68
- p = self.params
69
- if p.useCats:
70
- gt = self._gts[imgId, catId]
71
- dt = self._dts[imgId, catId]
72
- else:
73
- gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
74
- dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
75
- if len(gt) == 0 and len(dt) == 0:
76
- return []
77
- inds = np.argsort([-d["score"] for d in dt], kind="mergesort")
78
- dt = [dt[i] for i in inds]
79
- if len(dt) > p.maxDets[-1]:
80
- dt = dt[0 : p.maxDets[-1]]
81
-
82
- assert p.iouType == "bbox", "unsupported iouType for iou computation"
83
-
84
- g = [g["bbox"] for g in gt]
85
- d = [d["bbox"] for d in dt]
86
-
87
- # compute iou between each dt and gt region
88
- iscrowd = [int(o["iscrowd"]) for o in gt]
89
-
90
- # Note: this function is copied from cocoeval.py in cocoapi
91
- # and the major difference is here.
92
- ious = self.compute_iou_dt_gt(d, g, iscrowd)
93
- return ious
94
-
95
-
96
- class RotatedCOCOEvaluator(COCOEvaluator):
97
- """
98
- Evaluate object proposal/instance detection outputs using COCO-like metrics and APIs,
99
- with rotated boxes support.
100
- Note: this uses IOU only and does not consider angle differences.
101
- """
102
-
103
- def process(self, inputs, outputs):
104
- """
105
- Args:
106
- inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).
107
- It is a list of dict. Each dict corresponds to an image and
108
- contains keys like "height", "width", "file_name", "image_id".
109
- outputs: the outputs of a COCO model. It is a list of dicts with key
110
- "instances" that contains :class:`Instances`.
111
- """
112
- for input, output in zip(inputs, outputs):
113
- prediction = {"image_id": input["image_id"]}
114
-
115
- if "instances" in output:
116
- instances = output["instances"].to(self._cpu_device)
117
-
118
- prediction["instances"] = self.instances_to_json(instances, input["image_id"])
119
- if "proposals" in output:
120
- prediction["proposals"] = output["proposals"].to(self._cpu_device)
121
- self._predictions.append(prediction)
122
-
123
- def instances_to_json(self, instances, img_id):
124
- num_instance = len(instances)
125
- if num_instance == 0:
126
- return []
127
-
128
- boxes = instances.pred_boxes.tensor.numpy()
129
- if boxes.shape[1] == 4:
130
- boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
131
- boxes = boxes.tolist()
132
- scores = instances.scores.tolist()
133
- classes = instances.pred_classes.tolist()
134
-
135
- results = []
136
- for k in range(num_instance):
137
- result = {
138
- "image_id": img_id,
139
- "category_id": classes[k],
140
- "bbox": boxes[k],
141
- "score": scores[k],
142
- }
143
-
144
- results.append(result)
145
- return results
146
-
147
- def _eval_predictions(self, tasks, predictions):
148
- """
149
- Evaluate predictions on the given tasks.
150
- Fill self._results with the metrics of the tasks.
151
- """
152
- self._logger.info("Preparing results for COCO format ...")
153
- coco_results = list(itertools.chain(*[x["instances"] for x in predictions]))
154
-
155
- # unmap the category ids for COCO
156
- if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
157
- reverse_id_mapping = {
158
- v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items()
159
- }
160
- for result in coco_results:
161
- result["category_id"] = reverse_id_mapping[result["category_id"]]
162
-
163
- if self._output_dir:
164
- file_path = os.path.join(self._output_dir, "coco_instances_results.json")
165
- self._logger.info("Saving results to {}".format(file_path))
166
- with PathManager.open(file_path, "w") as f:
167
- f.write(json.dumps(coco_results))
168
- f.flush()
169
-
170
- if not self._do_evaluation:
171
- self._logger.info("Annotations are not available for evaluation.")
172
- return
173
-
174
- self._logger.info("Evaluating predictions ...")
175
- for task in sorted(tasks):
176
- assert task == "bbox", "Task {} is not supported".format(task)
177
- coco_eval = (
178
- self._evaluate_predictions_on_coco(self._coco_api, coco_results)
179
- if len(coco_results) > 0
180
- else None # cocoapi does not handle empty results very well
181
- )
182
-
183
- res = self._derive_coco_results(
184
- coco_eval, task, class_names=self._metadata.get("thing_classes")
185
- )
186
- self._results[task] = res
187
-
188
- def _evaluate_predictions_on_coco(self, coco_gt, coco_results):
189
- """
190
- Evaluate the coco results using COCOEval API.
191
- """
192
- assert len(coco_results) > 0
193
-
194
- coco_dt = coco_gt.loadRes(coco_results)
195
-
196
- # Only bbox is supported for now
197
- coco_eval = RotatedCOCOeval(coco_gt, coco_dt, iouType="bbox")
198
-
199
- coco_eval.evaluate()
200
- coco_eval.accumulate()
201
- coco_eval.summarize()
202
-
203
- return coco_eval
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/datasets/gqa/gqa_loader.py DELETED
@@ -1,277 +0,0 @@
1
- # --------------------------------------------------------
2
- # OpenVQA
3
- # Written by Yuhao Cui https://github.com/cuiyuhao1996
4
- # --------------------------------------------------------
5
-
6
- import numpy as np
7
- import glob, json, re, en_vectors_web_lg
8
- from openvqa.core.base_dataset import BaseDataSet
9
- from openvqa.utils.ans_punct import prep_ans
10
-
11
-
12
- class DataSet(BaseDataSet):
13
- def __init__(self, __C):
14
- super(DataSet, self).__init__()
15
- self.__C = __C
16
-
17
- # --------------------------
18
- # ---- Raw data loading ----
19
- # --------------------------
20
-
21
- ques_dict_preread = {
22
- 'train': json.load(open(__C.RAW_PATH[__C.DATASET]['train'], 'r')),
23
- 'val': json.load(open(__C.RAW_PATH[__C.DATASET]['val'], 'r')),
24
- 'testdev': json.load(open(__C.RAW_PATH[__C.DATASET]['testdev'], 'r')),
25
- 'test': json.load(open(__C.RAW_PATH[__C.DATASET]['test'], 'r')),
26
- }
27
-
28
- # Loading all image paths
29
- frcn_feat_path_list = glob.glob(__C.FEATS_PATH[__C.DATASET]['default-frcn'] + '/*.npz')
30
- grid_feat_path_list = glob.glob(__C.FEATS_PATH[__C.DATASET]['default-grid'] + '/*.npz')
31
-
32
- # Loading question word list
33
- # stat_ques_dict = {
34
- # **ques_dict_preread['train'],
35
- # **ques_dict_preread['val'],
36
- # **ques_dict_preread['testdev'],
37
- # **ques_dict_preread['test'],
38
- # }
39
-
40
- # Loading answer word list
41
- # stat_ans_dict = {
42
- # **ques_dict_preread['train'],
43
- # **ques_dict_preread['val'],
44
- # **ques_dict_preread['testdev'],
45
- # }
46
-
47
- # Loading question and answer list
48
- self.ques_dict = {}
49
- split_list = __C.SPLIT[__C.RUN_MODE].split('+')
50
- for split in split_list:
51
- if split in ques_dict_preread:
52
- self.ques_dict = {
53
- **self.ques_dict,
54
- **ques_dict_preread[split],
55
- }
56
- else:
57
- self.ques_dict = {
58
- **self.ques_dict,
59
- **json.load(open(__C.RAW_PATH[__C.DATASET][split], 'r')),
60
- }
61
-
62
- # Define run data size
63
- self.data_size = self.ques_dict.__len__()
64
- print(' ========== Dataset size:', self.data_size)
65
-
66
-
67
- # ------------------------
68
- # ---- Data statistic ----
69
- # ------------------------
70
-
71
- # {image id} -> {image feature absolutely path}
72
- self.iid_to_frcn_feat_path = self.img_feat_path_load(frcn_feat_path_list)
73
- self.iid_to_grid_feat_path = self.img_feat_path_load(grid_feat_path_list)
74
-
75
- # Loading dict: question dict -> question list
76
- self.qid_list = list(self.ques_dict.keys())
77
-
78
- # Tokenize
79
- self.token_to_ix, self.pretrained_emb, max_token = self.tokenize('openvqa/datasets/gqa/dicts.json', __C.USE_GLOVE)
80
- self.token_size = self.token_to_ix.__len__()
81
- print(' ========== Question token vocab size:', self.token_size)
82
-
83
- self.max_token = -1
84
- if self.max_token == -1:
85
- self.max_token = max_token
86
- print('Max token length:', max_token, 'Trimmed to:', self.max_token)
87
-
88
- # Answers statistic
89
- self.ans_to_ix, self.ix_to_ans = self.ans_stat('openvqa/datasets/gqa/dicts.json')
90
- self.ans_size = self.ans_to_ix.__len__()
91
- print(' ========== Answer token vocab size:', self.ans_size)
92
- print('Finished!')
93
- print('')
94
-
95
-
96
-
97
- def img_feat_path_load(self, path_list):
98
- iid_to_path = {}
99
-
100
- for ix, path in enumerate(path_list):
101
- iid = path.split('/')[-1].split('.')[0]
102
- iid_to_path[iid] = path
103
-
104
- return iid_to_path
105
-
106
-
107
- # def tokenize(self, stat_ques_dict, use_glove):
108
- # token_to_ix = {
109
- # 'PAD': 0,
110
- # 'UNK': 1,
111
- # 'CLS': 2,
112
- # }
113
- #
114
- # spacy_tool = None
115
- # pretrained_emb = []
116
- # if use_glove:
117
- # spacy_tool = en_vectors_web_lg.load()
118
- # pretrained_emb.append(spacy_tool('PAD').vector)
119
- # pretrained_emb.append(spacy_tool('UNK').vector)
120
- # pretrained_emb.append(spacy_tool('CLS').vector)
121
- #
122
- # max_token = 0
123
- # for qid in stat_ques_dict:
124
- # ques = stat_ques_dict[qid]['question']
125
- # words = re.sub(
126
- # r"([.,'!?\"()*#:;])",
127
- # '',
128
- # ques.lower()
129
- # ).replace('-', ' ').replace('/', ' ').split()
130
- #
131
- # if len(words) > max_token:
132
- # max_token = len(words)
133
- #
134
- # for word in words:
135
- # if word not in token_to_ix:
136
- # token_to_ix[word] = len(token_to_ix)
137
- # if use_glove:
138
- # pretrained_emb.append(spacy_tool(word).vector)
139
- #
140
- # pretrained_emb = np.array(pretrained_emb)
141
- #
142
- # return token_to_ix, pretrained_emb, max_token
143
- #
144
- #
145
- # def ans_stat(self, stat_ans_dict):
146
- # ans_to_ix = {}
147
- # ix_to_ans = {}
148
- #
149
- # for qid in stat_ans_dict:
150
- # ans = stat_ans_dict[qid]['answer']
151
- # ans = prep_ans(ans)
152
- #
153
- # if ans not in ans_to_ix:
154
- # ix_to_ans[ans_to_ix.__len__()] = ans
155
- # ans_to_ix[ans] = ans_to_ix.__len__()
156
- #
157
- # return ans_to_ix, ix_to_ans
158
-
159
-
160
- def tokenize(self, json_file, use_glove):
161
- token_to_ix, max_token = json.load(open(json_file, 'r'))[2:]
162
- spacy_tool = None
163
- if use_glove:
164
- spacy_tool = en_vectors_web_lg.load()
165
-
166
- pretrained_emb = []
167
- for word in token_to_ix:
168
- if use_glove:
169
- pretrained_emb.append(spacy_tool(word).vector)
170
- pretrained_emb = np.array(pretrained_emb)
171
-
172
- return token_to_ix, pretrained_emb, max_token
173
-
174
-
175
- def ans_stat(self, json_file):
176
- ans_to_ix, ix_to_ans = json.load(open(json_file, 'r'))[:2]
177
-
178
- return ans_to_ix, ix_to_ans
179
-
180
-
181
- # ----------------------------------------------
182
- # ---- Real-Time Processing Implementations ----
183
- # ----------------------------------------------
184
-
185
- def load_ques_ans(self, idx):
186
-
187
- qid = self.qid_list[idx]
188
- iid = self.ques_dict[qid]['imageId']
189
-
190
- ques = self.ques_dict[qid]['question']
191
- ques_ix_iter = self.proc_ques(ques, self.token_to_ix, max_token=self.max_token)
192
- ans_iter = np.zeros(1)
193
-
194
- if self.__C.RUN_MODE in ['train']:
195
- # process answers
196
- ans = self.ques_dict[qid]['answer']
197
- ans_iter = self.proc_ans(ans, self.ans_to_ix)
198
-
199
- return ques_ix_iter, ans_iter, iid
200
-
201
-
202
- def load_img_feats(self, idx, iid):
203
- frcn_feat = np.load(self.iid_to_frcn_feat_path[iid])
204
- frcn_feat_iter = self.proc_img_feat(frcn_feat['x'], img_feat_pad_size=self.__C.FEAT_SIZE['gqa']['FRCN_FEAT_SIZE'][0])
205
-
206
- grid_feat = np.load(self.iid_to_grid_feat_path[iid])
207
- grid_feat_iter = grid_feat['x']
208
-
209
- bbox_feat_iter = self.proc_img_feat(
210
- self.proc_bbox_feat(
211
- frcn_feat['bbox'],
212
- (frcn_feat['height'], frcn_feat['width'])
213
- ),
214
- img_feat_pad_size=self.__C.FEAT_SIZE['gqa']['BBOX_FEAT_SIZE'][0]
215
- )
216
-
217
- return frcn_feat_iter, grid_feat_iter, bbox_feat_iter
218
-
219
-
220
-
221
- # ------------------------------------
222
- # ---- Real-Time Processing Utils ----
223
- # ------------------------------------
224
-
225
- def proc_img_feat(self, img_feat, img_feat_pad_size):
226
- if img_feat.shape[0] > img_feat_pad_size:
227
- img_feat = img_feat[:img_feat_pad_size]
228
-
229
- img_feat = np.pad(
230
- img_feat,
231
- ((0, img_feat_pad_size - img_feat.shape[0]), (0, 0)),
232
- mode='constant',
233
- constant_values=0
234
- )
235
-
236
- return img_feat
237
-
238
-
239
- def proc_bbox_feat(self, bbox, img_shape):
240
- bbox_feat = np.zeros((bbox.shape[0], 5), dtype=np.float32)
241
-
242
- bbox_feat[:, 0] = bbox[:, 0] / float(img_shape[1])
243
- bbox_feat[:, 1] = bbox[:, 1] / float(img_shape[0])
244
- bbox_feat[:, 2] = bbox[:, 2] / float(img_shape[1])
245
- bbox_feat[:, 3] = bbox[:, 3] / float(img_shape[0])
246
- bbox_feat[:, 4] = (bbox[:, 2] - bbox[:, 0]) * (bbox[:, 3] - bbox[:, 1]) / float(img_shape[0] * img_shape[1])
247
-
248
- return bbox_feat
249
-
250
-
251
- def proc_ques(self, ques, token_to_ix, max_token):
252
- ques_ix = np.zeros(max_token, np.int64)
253
-
254
- words = re.sub(
255
- r"([.,'!?\"()*#:;])",
256
- '',
257
- ques.lower()
258
- ).replace('-', ' ').replace('/', ' ').split()
259
-
260
- for ix, word in enumerate(words):
261
- if word in token_to_ix:
262
- ques_ix[ix] = token_to_ix[word]
263
- else:
264
- ques_ix[ix] = token_to_ix['UNK']
265
-
266
- if ix + 1 == max_token:
267
- break
268
-
269
- return ques_ix
270
-
271
-
272
- def proc_ans(self, ans, ans_to_ix):
273
- ans_ix = np.zeros(1, np.int64)
274
- ans = prep_ans(ans)
275
- ans_ix[0] = ans_to_ix[ans]
276
-
277
- return ans_ix
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tests/test_numpy_dtypes.py DELETED
@@ -1,312 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- import re
3
-
4
- import pytest
5
-
6
- import env # noqa: F401
7
-
8
- from pybind11_tests import numpy_dtypes as m
9
-
10
- np = pytest.importorskip("numpy")
11
-
12
-
13
- @pytest.fixture(scope='module')
14
- def simple_dtype():
15
- ld = np.dtype('longdouble')
16
- return np.dtype({'names': ['bool_', 'uint_', 'float_', 'ldbl_'],
17
- 'formats': ['?', 'u4', 'f4', 'f{}'.format(ld.itemsize)],
18
- 'offsets': [0, 4, 8, (16 if ld.alignment > 4 else 12)]})
19
-
20
-
21
- @pytest.fixture(scope='module')
22
- def packed_dtype():
23
- return np.dtype([('bool_', '?'), ('uint_', 'u4'), ('float_', 'f4'), ('ldbl_', 'g')])
24
-
25
-
26
- def dt_fmt():
27
- from sys import byteorder
28
- e = '<' if byteorder == 'little' else '>'
29
- return ("{{'names':['bool_','uint_','float_','ldbl_'],"
30
- " 'formats':['?','" + e + "u4','" + e + "f4','" + e + "f{}'],"
31
- " 'offsets':[0,4,8,{}], 'itemsize':{}}}")
32
-
33
-
34
- def simple_dtype_fmt():
35
- ld = np.dtype('longdouble')
36
- simple_ld_off = 12 + 4 * (ld.alignment > 4)
37
- return dt_fmt().format(ld.itemsize, simple_ld_off, simple_ld_off + ld.itemsize)
38
-
39
-
40
- def packed_dtype_fmt():
41
- from sys import byteorder
42
- return "[('bool_', '?'), ('uint_', '{e}u4'), ('float_', '{e}f4'), ('ldbl_', '{e}f{}')]".format(
43
- np.dtype('longdouble').itemsize, e='<' if byteorder == 'little' else '>')
44
-
45
-
46
- def partial_ld_offset():
47
- return 12 + 4 * (np.dtype('uint64').alignment > 4) + 8 + 8 * (
48
- np.dtype('longdouble').alignment > 8)
49
-
50
-
51
- def partial_dtype_fmt():
52
- ld = np.dtype('longdouble')
53
- partial_ld_off = partial_ld_offset()
54
- return dt_fmt().format(ld.itemsize, partial_ld_off, partial_ld_off + ld.itemsize)
55
-
56
-
57
- def partial_nested_fmt():
58
- ld = np.dtype('longdouble')
59
- partial_nested_off = 8 + 8 * (ld.alignment > 8)
60
- partial_ld_off = partial_ld_offset()
61
- partial_nested_size = partial_nested_off * 2 + partial_ld_off + ld.itemsize
62
- return "{{'names':['a'], 'formats':[{}], 'offsets':[{}], 'itemsize':{}}}".format(
63
- partial_dtype_fmt(), partial_nested_off, partial_nested_size)
64
-
65
-
66
- def assert_equal(actual, expected_data, expected_dtype):
67
- np.testing.assert_equal(actual, np.array(expected_data, dtype=expected_dtype))
68
-
69
-
70
- def test_format_descriptors():
71
- with pytest.raises(RuntimeError) as excinfo:
72
- m.get_format_unbound()
73
- assert re.match('^NumPy type info missing for .*UnboundStruct.*$', str(excinfo.value))
74
-
75
- ld = np.dtype('longdouble')
76
- ldbl_fmt = ('4x' if ld.alignment > 4 else '') + ld.char
77
- ss_fmt = "^T{?:bool_:3xI:uint_:f:float_:" + ldbl_fmt + ":ldbl_:}"
78
- dbl = np.dtype('double')
79
- partial_fmt = ("^T{?:bool_:3xI:uint_:f:float_:" +
80
- str(4 * (dbl.alignment > 4) + dbl.itemsize + 8 * (ld.alignment > 8)) +
81
- "xg:ldbl_:}")
82
- nested_extra = str(max(8, ld.alignment))
83
- assert m.print_format_descriptors() == [
84
- ss_fmt,
85
- "^T{?:bool_:I:uint_:f:float_:g:ldbl_:}",
86
- "^T{" + ss_fmt + ":a:^T{?:bool_:I:uint_:f:float_:g:ldbl_:}:b:}",
87
- partial_fmt,
88
- "^T{" + nested_extra + "x" + partial_fmt + ":a:" + nested_extra + "x}",
89
- "^T{3s:a:3s:b:}",
90
- "^T{(3)4s:a:(2)i:b:(3)B:c:1x(4, 2)f:d:}",
91
- '^T{q:e1:B:e2:}',
92
- '^T{Zf:cflt:Zd:cdbl:}'
93
- ]
94
-
95
-
96
- def test_dtype(simple_dtype):
97
- from sys import byteorder
98
- e = '<' if byteorder == 'little' else '>'
99
-
100
- assert m.print_dtypes() == [
101
- simple_dtype_fmt(),
102
- packed_dtype_fmt(),
103
- "[('a', {}), ('b', {})]".format(simple_dtype_fmt(), packed_dtype_fmt()),
104
- partial_dtype_fmt(),
105
- partial_nested_fmt(),
106
- "[('a', 'S3'), ('b', 'S3')]",
107
- ("{{'names':['a','b','c','d'], " +
108
- "'formats':[('S4', (3,)),('" + e + "i4', (2,)),('u1', (3,)),('" + e + "f4', (4, 2))], " +
109
- "'offsets':[0,12,20,24], 'itemsize':56}}").format(e=e),
110
- "[('e1', '" + e + "i8'), ('e2', 'u1')]",
111
- "[('x', 'i1'), ('y', '" + e + "u8')]",
112
- "[('cflt', '" + e + "c8'), ('cdbl', '" + e + "c16')]"
113
- ]
114
-
115
- d1 = np.dtype({'names': ['a', 'b'], 'formats': ['int32', 'float64'],
116
- 'offsets': [1, 10], 'itemsize': 20})
117
- d2 = np.dtype([('a', 'i4'), ('b', 'f4')])
118
- assert m.test_dtype_ctors() == [np.dtype('int32'), np.dtype('float64'),
119
- np.dtype('bool'), d1, d1, np.dtype('uint32'), d2]
120
-
121
- assert m.test_dtype_methods() == [np.dtype('int32'), simple_dtype, False, True,
122
- np.dtype('int32').itemsize, simple_dtype.itemsize]
123
-
124
- assert m.trailing_padding_dtype() == m.buffer_to_dtype(np.zeros(1, m.trailing_padding_dtype()))
125
-
126
-
127
- def test_recarray(simple_dtype, packed_dtype):
128
- elements = [(False, 0, 0.0, -0.0), (True, 1, 1.5, -2.5), (False, 2, 3.0, -5.0)]
129
-
130
- for func, dtype in [(m.create_rec_simple, simple_dtype), (m.create_rec_packed, packed_dtype)]:
131
- arr = func(0)
132
- assert arr.dtype == dtype
133
- assert_equal(arr, [], simple_dtype)
134
- assert_equal(arr, [], packed_dtype)
135
-
136
- arr = func(3)
137
- assert arr.dtype == dtype
138
- assert_equal(arr, elements, simple_dtype)
139
- assert_equal(arr, elements, packed_dtype)
140
-
141
- if dtype == simple_dtype:
142
- assert m.print_rec_simple(arr) == [
143
- "s:0,0,0,-0",
144
- "s:1,1,1.5,-2.5",
145
- "s:0,2,3,-5"
146
- ]
147
- else:
148
- assert m.print_rec_packed(arr) == [
149
- "p:0,0,0,-0",
150
- "p:1,1,1.5,-2.5",
151
- "p:0,2,3,-5"
152
- ]
153
-
154
- nested_dtype = np.dtype([('a', simple_dtype), ('b', packed_dtype)])
155
-
156
- arr = m.create_rec_nested(0)
157
- assert arr.dtype == nested_dtype
158
- assert_equal(arr, [], nested_dtype)
159
-
160
- arr = m.create_rec_nested(3)
161
- assert arr.dtype == nested_dtype
162
- assert_equal(arr, [((False, 0, 0.0, -0.0), (True, 1, 1.5, -2.5)),
163
- ((True, 1, 1.5, -2.5), (False, 2, 3.0, -5.0)),
164
- ((False, 2, 3.0, -5.0), (True, 3, 4.5, -7.5))], nested_dtype)
165
- assert m.print_rec_nested(arr) == [
166
- "n:a=s:0,0,0,-0;b=p:1,1,1.5,-2.5",
167
- "n:a=s:1,1,1.5,-2.5;b=p:0,2,3,-5",
168
- "n:a=s:0,2,3,-5;b=p:1,3,4.5,-7.5"
169
- ]
170
-
171
- arr = m.create_rec_partial(3)
172
- assert str(arr.dtype) == partial_dtype_fmt()
173
- partial_dtype = arr.dtype
174
- assert '' not in arr.dtype.fields
175
- assert partial_dtype.itemsize > simple_dtype.itemsize
176
- assert_equal(arr, elements, simple_dtype)
177
- assert_equal(arr, elements, packed_dtype)
178
-
179
- arr = m.create_rec_partial_nested(3)
180
- assert str(arr.dtype) == partial_nested_fmt()
181
- assert '' not in arr.dtype.fields
182
- assert '' not in arr.dtype.fields['a'][0].fields
183
- assert arr.dtype.itemsize > partial_dtype.itemsize
184
- np.testing.assert_equal(arr['a'], m.create_rec_partial(3))
185
-
186
-
187
- def test_array_constructors():
188
- data = np.arange(1, 7, dtype='int32')
189
- for i in range(8):
190
- np.testing.assert_array_equal(m.test_array_ctors(10 + i), data.reshape((3, 2)))
191
- np.testing.assert_array_equal(m.test_array_ctors(20 + i), data.reshape((3, 2)))
192
- for i in range(5):
193
- np.testing.assert_array_equal(m.test_array_ctors(30 + i), data)
194
- np.testing.assert_array_equal(m.test_array_ctors(40 + i), data)
195
-
196
-
197
- def test_string_array():
198
- arr = m.create_string_array(True)
199
- assert str(arr.dtype) == "[('a', 'S3'), ('b', 'S3')]"
200
- assert m.print_string_array(arr) == [
201
- "a='',b=''",
202
- "a='a',b='a'",
203
- "a='ab',b='ab'",
204
- "a='abc',b='abc'"
205
- ]
206
- dtype = arr.dtype
207
- assert arr['a'].tolist() == [b'', b'a', b'ab', b'abc']
208
- assert arr['b'].tolist() == [b'', b'a', b'ab', b'abc']
209
- arr = m.create_string_array(False)
210
- assert dtype == arr.dtype
211
-
212
-
213
- def test_array_array():
214
- from sys import byteorder
215
- e = '<' if byteorder == 'little' else '>'
216
-
217
- arr = m.create_array_array(3)
218
- assert str(arr.dtype) == (
219
- "{{'names':['a','b','c','d'], " +
220
- "'formats':[('S4', (3,)),('" + e + "i4', (2,)),('u1', (3,)),('{e}f4', (4, 2))], " +
221
- "'offsets':[0,12,20,24], 'itemsize':56}}").format(e=e)
222
- assert m.print_array_array(arr) == [
223
- "a={{A,B,C,D},{K,L,M,N},{U,V,W,X}},b={0,1}," +
224
- "c={0,1,2},d={{0,1},{10,11},{20,21},{30,31}}",
225
- "a={{W,X,Y,Z},{G,H,I,J},{Q,R,S,T}},b={1000,1001}," +
226
- "c={10,11,12},d={{100,101},{110,111},{120,121},{130,131}}",
227
- "a={{S,T,U,V},{C,D,E,F},{M,N,O,P}},b={2000,2001}," +
228
- "c={20,21,22},d={{200,201},{210,211},{220,221},{230,231}}",
229
- ]
230
- assert arr['a'].tolist() == [[b'ABCD', b'KLMN', b'UVWX'],
231
- [b'WXYZ', b'GHIJ', b'QRST'],
232
- [b'STUV', b'CDEF', b'MNOP']]
233
- assert arr['b'].tolist() == [[0, 1], [1000, 1001], [2000, 2001]]
234
- assert m.create_array_array(0).dtype == arr.dtype
235
-
236
-
237
- def test_enum_array():
238
- from sys import byteorder
239
- e = '<' if byteorder == 'little' else '>'
240
-
241
- arr = m.create_enum_array(3)
242
- dtype = arr.dtype
243
- assert dtype == np.dtype([('e1', e + 'i8'), ('e2', 'u1')])
244
- assert m.print_enum_array(arr) == [
245
- "e1=A,e2=X",
246
- "e1=B,e2=Y",
247
- "e1=A,e2=X"
248
- ]
249
- assert arr['e1'].tolist() == [-1, 1, -1]
250
- assert arr['e2'].tolist() == [1, 2, 1]
251
- assert m.create_enum_array(0).dtype == dtype
252
-
253
-
254
- def test_complex_array():
255
- from sys import byteorder
256
- e = '<' if byteorder == 'little' else '>'
257
-
258
- arr = m.create_complex_array(3)
259
- dtype = arr.dtype
260
- assert dtype == np.dtype([('cflt', e + 'c8'), ('cdbl', e + 'c16')])
261
- assert m.print_complex_array(arr) == [
262
- "c:(0,0.25),(0.5,0.75)",
263
- "c:(1,1.25),(1.5,1.75)",
264
- "c:(2,2.25),(2.5,2.75)"
265
- ]
266
- assert arr['cflt'].tolist() == [0.0 + 0.25j, 1.0 + 1.25j, 2.0 + 2.25j]
267
- assert arr['cdbl'].tolist() == [0.5 + 0.75j, 1.5 + 1.75j, 2.5 + 2.75j]
268
- assert m.create_complex_array(0).dtype == dtype
269
-
270
-
271
- def test_signature(doc):
272
- assert doc(m.create_rec_nested) == \
273
- "create_rec_nested(arg0: int) -> numpy.ndarray[NestedStruct]"
274
-
275
-
276
- def test_scalar_conversion():
277
- n = 3
278
- arrays = [m.create_rec_simple(n), m.create_rec_packed(n),
279
- m.create_rec_nested(n), m.create_enum_array(n)]
280
- funcs = [m.f_simple, m.f_packed, m.f_nested]
281
-
282
- for i, func in enumerate(funcs):
283
- for j, arr in enumerate(arrays):
284
- if i == j and i < 2:
285
- assert [func(arr[k]) for k in range(n)] == [k * 10 for k in range(n)]
286
- else:
287
- with pytest.raises(TypeError) as excinfo:
288
- func(arr[0])
289
- assert 'incompatible function arguments' in str(excinfo.value)
290
-
291
-
292
- def test_register_dtype():
293
- with pytest.raises(RuntimeError) as excinfo:
294
- m.register_dtype()
295
- assert 'dtype is already registered' in str(excinfo.value)
296
-
297
-
298
- @pytest.mark.xfail("env.PYPY")
299
- def test_str_leak():
300
- from sys import getrefcount
301
- fmt = "f4"
302
- pytest.gc_collect()
303
- start = getrefcount(fmt)
304
- d = m.dtype_wrapper(fmt)
305
- assert d is np.dtype("f4")
306
- del d
307
- pytest.gc_collect()
308
- assert getrefcount(fmt) == start
309
-
310
-
311
- def test_compare_buffer_info():
312
- assert all(m.compare_buffer_info())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/cmake/FindTBB.cmake DELETED
@@ -1,440 +0,0 @@
1
- # - Find ThreadingBuildingBlocks include dirs and libraries
2
- # Use this module by invoking find_package with the form:
3
- # find_package(TBB
4
- # [REQUIRED] # Fail with error if TBB is not found
5
- # ) #
6
- # Once done, this will define
7
- #
8
- # TBB_FOUND - system has TBB
9
- # TBB_INCLUDE_DIRS - the TBB include directories
10
- # TBB_LIBRARIES - TBB libraries to be lined, doesn't include malloc or
11
- # malloc proxy
12
- # TBB::tbb - imported target for the TBB library
13
- #
14
- # TBB_VERSION - Product Version Number ("MAJOR.MINOR")
15
- # TBB_VERSION_MAJOR - Major Product Version Number
16
- # TBB_VERSION_MINOR - Minor Product Version Number
17
- # TBB_INTERFACE_VERSION - Engineering Focused Version Number
18
- # TBB_COMPATIBLE_INTERFACE_VERSION - The oldest major interface version
19
- # still supported. This uses the engineering
20
- # focused interface version numbers.
21
- #
22
- # TBB_MALLOC_FOUND - system has TBB malloc library
23
- # TBB_MALLOC_INCLUDE_DIRS - the TBB malloc include directories
24
- # TBB_MALLOC_LIBRARIES - The TBB malloc libraries to be lined
25
- # TBB::malloc - imported target for the TBB malloc library
26
- #
27
- # TBB_MALLOC_PROXY_FOUND - system has TBB malloc proxy library
28
- # TBB_MALLOC_PROXY_INCLUDE_DIRS = the TBB malloc proxy include directories
29
- # TBB_MALLOC_PROXY_LIBRARIES - The TBB malloc proxy libraries to be lined
30
- # TBB::malloc_proxy - imported target for the TBB malloc proxy library
31
- #
32
- #
33
- # This module reads hints about search locations from variables:
34
- # ENV TBB_ARCH_PLATFORM - for eg. set it to "mic" for Xeon Phi builds
35
- # ENV TBB_ROOT or just TBB_ROOT - root directory of tbb installation
36
- # ENV TBB_BUILD_PREFIX - specifies the build prefix for user built tbb
37
- # libraries. Should be specified with ENV TBB_ROOT
38
- # and optionally...
39
- # ENV TBB_BUILD_DIR - if build directory is different than ${TBB_ROOT}/build
40
- #
41
- #
42
- # Modified by Robert Maynard from the original OGRE source
43
- #
44
- #-------------------------------------------------------------------
45
- # This file is part of the CMake build system for OGRE
46
- # (Object-oriented Graphics Rendering Engine)
47
- # For the latest info, see http://www.ogre3d.org/
48
- #
49
- # The contents of this file are placed in the public domain. Feel
50
- # free to make use of it in any way you like.
51
- #-------------------------------------------------------------------
52
- #
53
- #=============================================================================
54
- # Copyright 2010-2012 Kitware, Inc.
55
- # Copyright 2012 Rolf Eike Beer <[email protected]>
56
- #
57
- # Distributed under the OSI-approved BSD License (the "License");
58
- # see accompanying file Copyright.txt for details.
59
- #
60
- # This software is distributed WITHOUT ANY WARRANTY; without even the
61
- # implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
62
- # See the License for more information.
63
- #=============================================================================
64
- # (To distribute this file outside of CMake, substitute the full
65
- # License text for the above reference.)
66
-
67
-
68
- #=============================================================================
69
- # FindTBB helper functions and macros
70
- #
71
-
72
- #====================================================
73
- # Fix the library path in case it is a linker script
74
- #====================================================
75
- function(tbb_extract_real_library library real_library)
76
- if(NOT UNIX OR NOT EXISTS ${library})
77
- set(${real_library} "${library}" PARENT_SCOPE)
78
- return()
79
- endif()
80
-
81
- #Read in the first 4 bytes and see if they are the ELF magic number
82
- set(_elf_magic "7f454c46")
83
- file(READ ${library} _hex_data OFFSET 0 LIMIT 4 HEX)
84
- if(_hex_data STREQUAL _elf_magic)
85
- #we have opened a elf binary so this is what
86
- #we should link to
87
- set(${real_library} "${library}" PARENT_SCOPE)
88
- return()
89
- endif()
90
-
91
- file(READ ${library} _data OFFSET 0 LIMIT 1024)
92
- if("${_data}" MATCHES "INPUT \\(([^(]+)\\)")
93
- #extract out the .so name from REGEX MATCH command
94
- set(_proper_so_name "${CMAKE_MATCH_1}")
95
-
96
- #construct path to the real .so which is presumed to be in the same directory
97
- #as the input file
98
- get_filename_component(_so_dir "${library}" DIRECTORY)
99
- set(${real_library} "${_so_dir}/${_proper_so_name}" PARENT_SCOPE)
100
- else()
101
- #unable to determine what this library is so just hope everything works
102
- #and pass it unmodified.
103
- set(${real_library} "${library}" PARENT_SCOPE)
104
- endif()
105
- endfunction()
106
-
107
- #===============================================
108
- # Do the final processing for the package find.
109
- #===============================================
110
- macro(findpkg_finish PREFIX TARGET_NAME)
111
- if (${PREFIX}_INCLUDE_DIR AND ${PREFIX}_LIBRARY)
112
- set(${PREFIX}_FOUND TRUE)
113
- set (${PREFIX}_INCLUDE_DIRS ${${PREFIX}_INCLUDE_DIR})
114
- set (${PREFIX}_LIBRARIES ${${PREFIX}_LIBRARY})
115
- else ()
116
- if (${PREFIX}_FIND_REQUIRED)
117
- message(FATAL_ERROR "Required library ${PREFIX} not found.")
118
- elseif (NOT ${PREFIX}_FIND_QUIETLY)
119
- message("Library ${PREFIX} not found.")
120
- endif()
121
- return()
122
- endif ()
123
-
124
- if (NOT TARGET "TBB::${TARGET_NAME}")
125
- if (${PREFIX}_LIBRARY_RELEASE)
126
- tbb_extract_real_library(${${PREFIX}_LIBRARY_RELEASE} real_release)
127
- endif ()
128
- if (${PREFIX}_LIBRARY_DEBUG)
129
- tbb_extract_real_library(${${PREFIX}_LIBRARY_DEBUG} real_debug)
130
- endif ()
131
- add_library(TBB::${TARGET_NAME} UNKNOWN IMPORTED)
132
- set_target_properties(TBB::${TARGET_NAME} PROPERTIES
133
- INTERFACE_INCLUDE_DIRECTORIES "${${PREFIX}_INCLUDE_DIR}")
134
- if (${PREFIX}_LIBRARY_DEBUG AND ${PREFIX}_LIBRARY_RELEASE)
135
- set_target_properties(TBB::${TARGET_NAME} PROPERTIES
136
- IMPORTED_LOCATION "${real_release}"
137
- IMPORTED_LOCATION_DEBUG "${real_debug}"
138
- IMPORTED_LOCATION_RELEASE "${real_release}")
139
- elseif (${PREFIX}_LIBRARY_RELEASE)
140
- set_target_properties(TBB::${TARGET_NAME} PROPERTIES
141
- IMPORTED_LOCATION "${real_release}")
142
- elseif (${PREFIX}_LIBRARY_DEBUG)
143
- set_target_properties(TBB::${TARGET_NAME} PROPERTIES
144
- IMPORTED_LOCATION "${real_debug}")
145
- endif ()
146
- endif ()
147
-
148
- #mark the following variables as internal variables
149
- mark_as_advanced(${PREFIX}_INCLUDE_DIR
150
- ${PREFIX}_LIBRARY
151
- ${PREFIX}_LIBRARY_DEBUG
152
- ${PREFIX}_LIBRARY_RELEASE)
153
- endmacro()
154
-
155
- #===============================================
156
- # Generate debug names from given release names
157
- #===============================================
158
- macro(get_debug_names PREFIX)
159
- foreach(i ${${PREFIX}})
160
- set(${PREFIX}_DEBUG ${${PREFIX}_DEBUG} ${i}d ${i}D ${i}_d ${i}_D ${i}_debug ${i})
161
- endforeach()
162
- endmacro()
163
-
164
- #===============================================
165
- # See if we have env vars to help us find tbb
166
- #===============================================
167
- macro(getenv_path VAR)
168
- set(ENV_${VAR} $ENV{${VAR}})
169
- # replace won't work if var is blank
170
- if (ENV_${VAR})
171
- string( REGEX REPLACE "\\\\" "/" ENV_${VAR} ${ENV_${VAR}} )
172
- endif ()
173
- endmacro()
174
-
175
- #===============================================
176
- # Couple a set of release AND debug libraries
177
- #===============================================
178
- macro(make_library_set PREFIX)
179
- if (${PREFIX}_RELEASE AND ${PREFIX}_DEBUG)
180
- set(${PREFIX} optimized ${${PREFIX}_RELEASE} debug ${${PREFIX}_DEBUG})
181
- elseif (${PREFIX}_RELEASE)
182
- set(${PREFIX} ${${PREFIX}_RELEASE})
183
- elseif (${PREFIX}_DEBUG)
184
- set(${PREFIX} ${${PREFIX}_DEBUG})
185
- endif ()
186
- endmacro()
187
-
188
-
189
- #=============================================================================
190
- # Now to actually find TBB
191
- #
192
-
193
- # Get path, convert backslashes as ${ENV_${var}}
194
- getenv_path(TBB_ROOT)
195
-
196
- # initialize search paths
197
- set(TBB_PREFIX_PATH ${TBB_ROOT} ${ENV_TBB_ROOT})
198
- set(TBB_INC_SEARCH_PATH "")
199
- set(TBB_LIB_SEARCH_PATH "")
200
-
201
-
202
- # If user built from sources
203
- set(TBB_BUILD_PREFIX $ENV{TBB_BUILD_PREFIX})
204
- if (TBB_BUILD_PREFIX AND ENV_TBB_ROOT)
205
- getenv_path(TBB_BUILD_DIR)
206
- if (NOT ENV_TBB_BUILD_DIR)
207
- set(ENV_TBB_BUILD_DIR ${ENV_TBB_ROOT}/build)
208
- endif ()
209
-
210
- # include directory under ${ENV_TBB_ROOT}/include
211
- list(APPEND TBB_LIB_SEARCH_PATH
212
- ${ENV_TBB_BUILD_DIR}/${TBB_BUILD_PREFIX}_release
213
- ${ENV_TBB_BUILD_DIR}/${TBB_BUILD_PREFIX}_debug)
214
- endif ()
215
-
216
-
217
- # For Windows, let's assume that the user might be using the precompiled
218
- # TBB packages from the main website. These use a rather awkward directory
219
- # structure (at least for automatically finding the right files) depending
220
- # on platform and compiler, but we'll do our best to accommodate it.
221
- # Not adding the same effort for the precompiled linux builds, though. Those
222
- # have different versions for CC compiler versions and linux kernels which
223
- # will never adequately match the user's setup, so there is no feasible way
224
- # to detect the "best" version to use. The user will have to manually
225
- # select the right files. (Chances are the distributions are shipping their
226
- # custom version of tbb, anyway, so the problem is probably nonexistent.)
227
- if (WIN32 AND MSVC)
228
- set(COMPILER_PREFIX "vc7.1")
229
- if (MSVC_VERSION EQUAL 1400)
230
- set(COMPILER_PREFIX "vc8")
231
- elseif(MSVC_VERSION EQUAL 1500)
232
- set(COMPILER_PREFIX "vc9")
233
- elseif(MSVC_VERSION EQUAL 1600)
234
- set(COMPILER_PREFIX "vc10")
235
- elseif(MSVC_VERSION EQUAL 1700)
236
- set(COMPILER_PREFIX "vc11")
237
- elseif(MSVC_VERSION EQUAL 1800)
238
- set(COMPILER_PREFIX "vc12")
239
- elseif(MSVC_VERSION GREATER_EQUAL 1900 AND MSVC_VERSION LESS_EQUAL 1925)
240
- # 1900-1925 actually spans three Visual Studio versions:
241
- # 1900 = VS 14.0 (v140 toolset) a.k.a. MSVC 2015
242
- # 1910-1919 = VS 15.0 (v141 toolset) a.k.a. MSVC 2017
243
- # 1920-1929 = VS 16.0 (v142 toolset) a.k.a. MSVC 2019
244
- #
245
- # But these are binary compatible and TBB's open source distribution only
246
- # ships a single vs14 lib (as of 2020.0)
247
- set(COMPILER_PREFIX "vc14")
248
- else()
249
- # The next poor soul who finds themselves having to decode visual studio
250
- # version conventions may find these helpful:
251
- # - https://cmake.org/cmake/help/latest/variable/MSVC_VERSION.html
252
- # - https://en.wikipedia.org/wiki/Microsoft_Visual_C%2B%2B#Internal_version_numbering
253
- message(AUTHOR_WARNING
254
- "Unrecognized MSVC version. Please update FindTBB.cmake. "
255
- "Some TBB_* values may need to be set manually."
256
- )
257
- endif ()
258
-
259
- # for each prefix path, add ia32/64\${COMPILER_PREFIX}\lib to the lib search path
260
- foreach (dir IN LISTS TBB_PREFIX_PATH)
261
- if (CMAKE_CL_64)
262
- list(APPEND TBB_LIB_SEARCH_PATH ${dir}/ia64/${COMPILER_PREFIX}/lib)
263
- list(APPEND TBB_LIB_SEARCH_PATH ${dir}/lib/ia64/${COMPILER_PREFIX})
264
- list(APPEND TBB_LIB_SEARCH_PATH ${dir}/intel64/${COMPILER_PREFIX}/lib)
265
- list(APPEND TBB_LIB_SEARCH_PATH ${dir}/lib/intel64/${COMPILER_PREFIX})
266
- else ()
267
- list(APPEND TBB_LIB_SEARCH_PATH ${dir}/ia32/${COMPILER_PREFIX}/lib)
268
- list(APPEND TBB_LIB_SEARCH_PATH ${dir}/lib/ia32/${COMPILER_PREFIX})
269
- endif ()
270
- endforeach ()
271
- endif ()
272
-
273
- # For OS X binary distribution, choose libc++ based libraries for Mavericks (10.9)
274
- # and above and AppleClang
275
- if (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND
276
- NOT CMAKE_SYSTEM_VERSION VERSION_LESS 13.0)
277
- set (USE_LIBCXX OFF)
278
- cmake_policy(GET CMP0025 POLICY_VAR)
279
-
280
- if (POLICY_VAR STREQUAL "NEW")
281
- if (CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
282
- set (USE_LIBCXX ON)
283
- endif ()
284
- else ()
285
- if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
286
- set (USE_LIBCXX ON)
287
- endif ()
288
- endif ()
289
-
290
- if (USE_LIBCXX)
291
- foreach (dir IN LISTS TBB_PREFIX_PATH)
292
- list (APPEND TBB_LIB_SEARCH_PATH ${dir}/lib/libc++ ${dir}/libc++/lib)
293
- endforeach ()
294
- endif ()
295
- endif ()
296
-
297
- # check compiler ABI
298
- if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
299
- set(COMPILER_PREFIX)
300
- if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.7)
301
- list(APPEND COMPILER_PREFIX "gcc4.7")
302
- endif()
303
- if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.4)
304
- list(APPEND COMPILER_PREFIX "gcc4.4")
305
- endif()
306
- list(APPEND COMPILER_PREFIX "gcc4.1")
307
- elseif(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
308
- set(COMPILER_PREFIX)
309
- if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.6)
310
- list(APPEND COMPILER_PREFIX "gcc4.7")
311
- endif()
312
- list(APPEND COMPILER_PREFIX "gcc4.4")
313
- else() # Assume compatibility with 4.4 for other compilers
314
- list(APPEND COMPILER_PREFIX "gcc4.4")
315
- endif ()
316
-
317
- # if platform architecture is explicitly specified
318
- set(TBB_ARCH_PLATFORM $ENV{TBB_ARCH_PLATFORM})
319
- if (TBB_ARCH_PLATFORM)
320
- foreach (dir IN LISTS TBB_PREFIX_PATH)
321
- list(APPEND TBB_LIB_SEARCH_PATH ${dir}/${TBB_ARCH_PLATFORM}/lib)
322
- list(APPEND TBB_LIB_SEARCH_PATH ${dir}/lib/${TBB_ARCH_PLATFORM})
323
- endforeach ()
324
- endif ()
325
-
326
- foreach (dir IN LISTS TBB_PREFIX_PATH)
327
- foreach (prefix IN LISTS COMPILER_PREFIX)
328
- if (CMAKE_SIZEOF_VOID_P EQUAL 8)
329
- list(APPEND TBB_LIB_SEARCH_PATH ${dir}/lib/intel64)
330
- list(APPEND TBB_LIB_SEARCH_PATH ${dir}/lib/intel64/${prefix})
331
- list(APPEND TBB_LIB_SEARCH_PATH ${dir}/intel64/lib)
332
- list(APPEND TBB_LIB_SEARCH_PATH ${dir}/intel64/${prefix}/lib)
333
- else ()
334
- list(APPEND TBB_LIB_SEARCH_PATH ${dir}/lib/ia32)
335
- list(APPEND TBB_LIB_SEARCH_PATH ${dir}/lib/ia32/${prefix})
336
- list(APPEND TBB_LIB_SEARCH_PATH ${dir}/ia32/lib)
337
- list(APPEND TBB_LIB_SEARCH_PATH ${dir}/ia32/${prefix}/lib)
338
- endif ()
339
- endforeach()
340
- endforeach ()
341
-
342
- # add general search paths
343
- foreach (dir IN LISTS TBB_PREFIX_PATH)
344
- list(APPEND TBB_LIB_SEARCH_PATH ${dir}/lib ${dir}/Lib ${dir}/lib/tbb
345
- ${dir}/Libs)
346
- list(APPEND TBB_INC_SEARCH_PATH ${dir}/include ${dir}/Include
347
- ${dir}/include/tbb)
348
- endforeach ()
349
-
350
- set(TBB_LIBRARY_NAMES tbb)
351
- get_debug_names(TBB_LIBRARY_NAMES)
352
-
353
-
354
- find_path(TBB_INCLUDE_DIR
355
- NAMES tbb/tbb.h
356
- PATHS ${TBB_INC_SEARCH_PATH})
357
-
358
- find_library(TBB_LIBRARY_RELEASE
359
- NAMES ${TBB_LIBRARY_NAMES}
360
- PATHS ${TBB_LIB_SEARCH_PATH})
361
- find_library(TBB_LIBRARY_DEBUG
362
- NAMES ${TBB_LIBRARY_NAMES_DEBUG}
363
- PATHS ${TBB_LIB_SEARCH_PATH})
364
- make_library_set(TBB_LIBRARY)
365
-
366
- findpkg_finish(TBB tbb)
367
-
368
- #if we haven't found TBB no point on going any further
369
- if (NOT TBB_FOUND)
370
- return()
371
- endif ()
372
-
373
- #=============================================================================
374
- # Look for TBB's malloc package
375
- set(TBB_MALLOC_LIBRARY_NAMES tbbmalloc)
376
- get_debug_names(TBB_MALLOC_LIBRARY_NAMES)
377
-
378
- find_path(TBB_MALLOC_INCLUDE_DIR
379
- NAMES tbb/tbb.h
380
- PATHS ${TBB_INC_SEARCH_PATH})
381
-
382
- find_library(TBB_MALLOC_LIBRARY_RELEASE
383
- NAMES ${TBB_MALLOC_LIBRARY_NAMES}
384
- PATHS ${TBB_LIB_SEARCH_PATH})
385
- find_library(TBB_MALLOC_LIBRARY_DEBUG
386
- NAMES ${TBB_MALLOC_LIBRARY_NAMES_DEBUG}
387
- PATHS ${TBB_LIB_SEARCH_PATH})
388
- make_library_set(TBB_MALLOC_LIBRARY)
389
-
390
- findpkg_finish(TBB_MALLOC tbbmalloc)
391
-
392
- #=============================================================================
393
- # Look for TBB's malloc proxy package
394
- set(TBB_MALLOC_PROXY_LIBRARY_NAMES tbbmalloc_proxy)
395
- get_debug_names(TBB_MALLOC_PROXY_LIBRARY_NAMES)
396
-
397
- find_path(TBB_MALLOC_PROXY_INCLUDE_DIR
398
- NAMES tbb/tbbmalloc_proxy.h
399
- PATHS ${TBB_INC_SEARCH_PATH})
400
-
401
- find_library(TBB_MALLOC_PROXY_LIBRARY_RELEASE
402
- NAMES ${TBB_MALLOC_PROXY_LIBRARY_NAMES}
403
- PATHS ${TBB_LIB_SEARCH_PATH})
404
- find_library(TBB_MALLOC_PROXY_LIBRARY_DEBUG
405
- NAMES ${TBB_MALLOC_PROXY_LIBRARY_NAMES_DEBUG}
406
- PATHS ${TBB_LIB_SEARCH_PATH})
407
- make_library_set(TBB_MALLOC_PROXY_LIBRARY)
408
-
409
- findpkg_finish(TBB_MALLOC_PROXY tbbmalloc_proxy)
410
-
411
-
412
- #=============================================================================
413
- #parse all the version numbers from tbb
414
- if(NOT TBB_VERSION)
415
-
416
- #only read the start of the file
417
- file(STRINGS
418
- "${TBB_INCLUDE_DIR}/tbb/tbb_stddef.h"
419
- TBB_VERSION_CONTENTS
420
- REGEX "VERSION")
421
-
422
- string(REGEX REPLACE
423
- ".*#define TBB_VERSION_MAJOR ([0-9]+).*" "\\1"
424
- TBB_VERSION_MAJOR "${TBB_VERSION_CONTENTS}")
425
-
426
- string(REGEX REPLACE
427
- ".*#define TBB_VERSION_MINOR ([0-9]+).*" "\\1"
428
- TBB_VERSION_MINOR "${TBB_VERSION_CONTENTS}")
429
-
430
- string(REGEX REPLACE
431
- ".*#define TBB_INTERFACE_VERSION ([0-9]+).*" "\\1"
432
- TBB_INTERFACE_VERSION "${TBB_VERSION_CONTENTS}")
433
-
434
- string(REGEX REPLACE
435
- ".*#define TBB_COMPATIBLE_INTERFACE_VERSION ([0-9]+).*" "\\1"
436
- TBB_COMPATIBLE_INTERFACE_VERSION "${TBB_VERSION_CONTENTS}")
437
-
438
- set(TBB_VERSION "${TBB_VERSION_MAJOR}.${TBB_VERSION_MINOR}")
439
-
440
- endif()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/per_device_resource.h DELETED
@@ -1,22 +0,0 @@
1
- /*
2
- * Copyright 2018 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system has no special per device resource functions
22
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/ml-talking-face/client_rest.py DELETED
@@ -1,74 +0,0 @@
1
- import requests
2
- import json
3
- import base64
4
- import argparse
5
-
6
- VIDEO_WIDTH = 1080
7
- VIDEO_HEIGHT = 1920
8
- SPEAKER_ID = 0
9
-
10
- class RestAPIApplication:
11
- def __init__(self, ip, port):
12
-
13
- if port < 0:
14
- self.post_request_addr = f"http://{ip}/register/"
15
- self.post_headers = {"Content-Type": "application/json"}
16
- self.generate_addr = (lambda id_: f'http://{ip}/generate/{id_}')
17
- else:
18
- self.post_request_addr = f"http://{ip}:{port}/register/"
19
- self.post_headers = {"Content-Type": "application/json"}
20
- self.generate_addr = (lambda id_: f'http://{ip}:{port}/generate/{id_}')
21
-
22
- @staticmethod
23
- def _get_json_request(text, lang, duration_rate, action, background_data=None, is_video_background=False):
24
- request_form = dict()
25
-
26
- request_form['text'] = text
27
- request_form['speaker'] = SPEAKER_ID
28
- request_form['width'] = VIDEO_WIDTH
29
- request_form['height'] = VIDEO_HEIGHT
30
-
31
- request_form['action'] = action
32
-
33
- if background_data is not None:
34
- background_base64 = base64.b64encode(background_data).decode("UTF-8")
35
- else:
36
- background_base64 = ""
37
-
38
- request_form['background'] = background_base64
39
- request_form['durationRate'] = duration_rate
40
- request_form['isVideoBackground'] = is_video_background
41
- request_form['lang'] = lang
42
-
43
- request_as_json = json.dumps(request_form)
44
- return request_as_json
45
-
46
- @staticmethod
47
- def _get_video_id(results):
48
- return json.loads(bytes.decode(results.content))['id']
49
-
50
- def get_video(self, text, lang, duration_rate, action, background_data=None, is_video_background=False):
51
- request_json = self._get_json_request(text, lang, duration_rate, action, background_data, is_video_background)
52
-
53
- # POST request with jsonified request
54
- results = requests.post(self.post_request_addr, headers=self.post_headers, data=request_json)
55
-
56
- # GET video with the given id
57
- video_id = self._get_video_id(results)
58
- video_results = requests.get(self.generate_addr(video_id))
59
-
60
- return video_results.content
61
-
62
-
63
- def parse_args():
64
- parser = argparse.ArgumentParser(
65
- description='REST API interface for talking face generation submitted to CVPR2022')
66
- parser.add_argument('-i', '--ip', dest='rest_ip', type=str, default="127.0.0.1", help="IP for REST API")
67
- parser.add_argument('-p', '--port', dest='rest_port', type=int, default=8080, help="Port for REST API")
68
- args = parser.parse_args()
69
- return args
70
-
71
-
72
- if __name__ == '__main__':
73
- args = parse_args()
74
- rest_api_application = RestAPIApplication(args.rest_ip, args.rest_port)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Caoyunkang/Segment-Any-Anomaly/SAM/segment_anything/modeling/prompt_encoder.py DELETED
@@ -1,214 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import numpy as np
8
- import torch
9
- from torch import nn
10
-
11
- from typing import Any, Optional, Tuple, Type
12
-
13
- from .common import LayerNorm2d
14
-
15
-
16
- class PromptEncoder(nn.Module):
17
- def __init__(
18
- self,
19
- embed_dim: int,
20
- image_embedding_size: Tuple[int, int],
21
- input_image_size: Tuple[int, int],
22
- mask_in_chans: int,
23
- activation: Type[nn.Module] = nn.GELU,
24
- ) -> None:
25
- """
26
- Encodes prompts for input to SAM's mask decoder.
27
-
28
- Arguments:
29
- embed_dim (int): The prompts' embedding dimension
30
- image_embedding_size (tuple(int, int)): The spatial size of the
31
- image embedding, as (H, W).
32
- input_image_size (int): The padded size of the image as input
33
- to the image encoder, as (H, W).
34
- mask_in_chans (int): The number of hidden channels used for
35
- encoding input masks.
36
- activation (nn.Module): The activation to use when encoding
37
- input masks.
38
- """
39
- super().__init__()
40
- self.embed_dim = embed_dim
41
- self.input_image_size = input_image_size
42
- self.image_embedding_size = image_embedding_size
43
- self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)
44
-
45
- self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners
46
- point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)]
47
- self.point_embeddings = nn.ModuleList(point_embeddings)
48
- self.not_a_point_embed = nn.Embedding(1, embed_dim)
49
-
50
- self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1])
51
- self.mask_downscaling = nn.Sequential(
52
- nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),
53
- LayerNorm2d(mask_in_chans // 4),
54
- activation(),
55
- nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),
56
- LayerNorm2d(mask_in_chans),
57
- activation(),
58
- nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),
59
- )
60
- self.no_mask_embed = nn.Embedding(1, embed_dim)
61
-
62
- def get_dense_pe(self) -> torch.Tensor:
63
- """
64
- Returns the positional encoding used to encode point prompts,
65
- applied to a dense set of points the shape of the image encoding.
66
-
67
- Returns:
68
- torch.Tensor: Positional encoding with shape
69
- 1x(embed_dim)x(embedding_h)x(embedding_w)
70
- """
71
- return self.pe_layer(self.image_embedding_size).unsqueeze(0)
72
-
73
- def _embed_points(
74
- self,
75
- points: torch.Tensor,
76
- labels: torch.Tensor,
77
- pad: bool,
78
- ) -> torch.Tensor:
79
- """Embeds point prompts."""
80
- points = points + 0.5 # Shift to center of pixel
81
- if pad:
82
- padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)
83
- padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)
84
- points = torch.cat([points, padding_point], dim=1)
85
- labels = torch.cat([labels, padding_label], dim=1)
86
- point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)
87
- point_embedding[labels == -1] = 0.0
88
- point_embedding[labels == -1] += self.not_a_point_embed.weight
89
- point_embedding[labels == 0] += self.point_embeddings[0].weight
90
- point_embedding[labels == 1] += self.point_embeddings[1].weight
91
- return point_embedding
92
-
93
- def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
94
- """Embeds box prompts."""
95
- boxes = boxes + 0.5 # Shift to center of pixel
96
- coords = boxes.reshape(-1, 2, 2)
97
- corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)
98
- corner_embedding[:, 0, :] += self.point_embeddings[2].weight
99
- corner_embedding[:, 1, :] += self.point_embeddings[3].weight
100
- return corner_embedding
101
-
102
- def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:
103
- """Embeds mask inputs."""
104
- mask_embedding = self.mask_downscaling(masks)
105
- return mask_embedding
106
-
107
- def _get_batch_size(
108
- self,
109
- points: Optional[Tuple[torch.Tensor, torch.Tensor]],
110
- boxes: Optional[torch.Tensor],
111
- masks: Optional[torch.Tensor],
112
- ) -> int:
113
- """
114
- Gets the batch size of the output given the batch size of the input prompts.
115
- """
116
- if points is not None:
117
- return points[0].shape[0]
118
- elif boxes is not None:
119
- return boxes.shape[0]
120
- elif masks is not None:
121
- return masks.shape[0]
122
- else:
123
- return 1
124
-
125
- def _get_device(self) -> torch.device:
126
- return self.point_embeddings[0].weight.device
127
-
128
- def forward(
129
- self,
130
- points: Optional[Tuple[torch.Tensor, torch.Tensor]],
131
- boxes: Optional[torch.Tensor],
132
- masks: Optional[torch.Tensor],
133
- ) -> Tuple[torch.Tensor, torch.Tensor]:
134
- """
135
- Embeds different types of prompts, returning both sparse and dense
136
- embeddings.
137
-
138
- Arguments:
139
- points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates
140
- and labels to embed.
141
- boxes (torch.Tensor or none): boxes to embed
142
- masks (torch.Tensor or none): masks to embed
143
-
144
- Returns:
145
- torch.Tensor: sparse embeddings for the points and boxes, with shape
146
- BxNx(embed_dim), where N is determined by the number of input points
147
- and boxes.
148
- torch.Tensor: dense embeddings for the masks, in the shape
149
- Bx(embed_dim)x(embed_H)x(embed_W)
150
- """
151
- bs = self._get_batch_size(points, boxes, masks)
152
- sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device())
153
- if points is not None:
154
- coords, labels = points
155
- point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))
156
- sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)
157
- if boxes is not None:
158
- box_embeddings = self._embed_boxes(boxes)
159
- sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)
160
-
161
- if masks is not None:
162
- dense_embeddings = self._embed_masks(masks)
163
- else:
164
- dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
165
- bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]
166
- )
167
-
168
- return sparse_embeddings, dense_embeddings
169
-
170
-
171
- class PositionEmbeddingRandom(nn.Module):
172
- """
173
- Positional encoding using random spatial frequencies.
174
- """
175
-
176
- def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None:
177
- super().__init__()
178
- if scale is None or scale <= 0.0:
179
- scale = 1.0
180
- self.register_buffer(
181
- "positional_encoding_gaussian_matrix",
182
- scale * torch.randn((2, num_pos_feats)),
183
- )
184
-
185
- def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor:
186
- """Positionally encode points that are normalized to [0,1]."""
187
- # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
188
- coords = 2 * coords - 1
189
- coords = coords @ self.positional_encoding_gaussian_matrix
190
- coords = 2 * np.pi * coords
191
- # outputs d_1 x ... x d_n x C shape
192
- return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1)
193
-
194
- def forward(self, size: Tuple[int, int]) -> torch.Tensor:
195
- """Generate positional encoding for a grid of the specified size."""
196
- h, w = size
197
- device: Any = self.positional_encoding_gaussian_matrix.device
198
- grid = torch.ones((h, w), device=device, dtype=torch.float32)
199
- y_embed = grid.cumsum(dim=0) - 0.5
200
- x_embed = grid.cumsum(dim=1) - 0.5
201
- y_embed = y_embed / h
202
- x_embed = x_embed / w
203
-
204
- pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1))
205
- return pe.permute(2, 0, 1) # C x H x W
206
-
207
- def forward_with_coords(
208
- self, coords_input: torch.Tensor, image_size: Tuple[int, int]
209
- ) -> torch.Tensor:
210
- """Positionally encode points that are not normalized to [0,1]."""
211
- coords = coords_input.clone()
212
- coords[:, :, 0] = coords[:, :, 0] / image_size[1]
213
- coords[:, :, 1] = coords[:, :, 1] / image_size[0]
214
- return self._pe_encoding(coords.to(torch.float)) # B x N x C
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChandraMohanNayal/AutoGPT/autogpt/agent/agent.py DELETED
@@ -1,197 +0,0 @@
1
- from colorama import Fore, Style
2
-
3
- from autogpt.app import execute_command, get_command
4
- from autogpt.chat import chat_with_ai, create_chat_message
5
- from autogpt.config import Config
6
- from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques
7
- from autogpt.json_utils.utilities import validate_json
8
- from autogpt.logs import logger, print_assistant_thoughts
9
- from autogpt.speech import say_text
10
- from autogpt.spinner import Spinner
11
- from autogpt.utils import clean_input
12
-
13
-
14
- class Agent:
15
- """Agent class for interacting with Auto-GPT.
16
-
17
- Attributes:
18
- ai_name: The name of the agent.
19
- memory: The memory object to use.
20
- full_message_history: The full message history.
21
- next_action_count: The number of actions to execute.
22
- system_prompt: The system prompt is the initial prompt that defines everything the AI needs to know to achieve its task successfully.
23
- Currently, the dynamic and customizable information in the system prompt are ai_name, description and goals.
24
-
25
- triggering_prompt: The last sentence the AI will see before answering. For Auto-GPT, this prompt is:
26
- Determine which next command to use, and respond using the format specified above:
27
- The triggering prompt is not part of the system prompt because between the system prompt and the triggering
28
- prompt we have contextual information that can distract the AI and make it forget that its goal is to find the next task to achieve.
29
- SYSTEM PROMPT
30
- CONTEXTUAL INFORMATION (memory, previous conversations, anything relevant)
31
- TRIGGERING PROMPT
32
-
33
- The triggering prompt reminds the AI about its short term meta task (defining the next task)
34
- """
35
-
36
- def __init__(
37
- self,
38
- ai_name,
39
- memory,
40
- full_message_history,
41
- next_action_count,
42
- system_prompt,
43
- triggering_prompt,
44
- ):
45
- self.ai_name = ai_name
46
- self.memory = memory
47
- self.full_message_history = full_message_history
48
- self.next_action_count = next_action_count
49
- self.system_prompt = system_prompt
50
- self.triggering_prompt = triggering_prompt
51
-
52
- def start_interaction_loop(self):
53
- # Interaction Loop
54
- cfg = Config()
55
- loop_count = 0
56
- command_name = None
57
- arguments = None
58
- user_input = ""
59
-
60
- while True:
61
- # Discontinue if continuous limit is reached
62
- loop_count += 1
63
- if (
64
- cfg.continuous_mode
65
- and cfg.continuous_limit > 0
66
- and loop_count > cfg.continuous_limit
67
- ):
68
- logger.typewriter_log(
69
- "Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}"
70
- )
71
- break
72
-
73
- # Send message to AI, get response
74
- with Spinner("Thinking... "):
75
- assistant_reply = chat_with_ai(
76
- self.system_prompt,
77
- self.triggering_prompt,
78
- self.full_message_history,
79
- self.memory,
80
- cfg.fast_token_limit,
81
- ) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
82
-
83
- assistant_reply_json = fix_json_using_multiple_techniques(assistant_reply)
84
-
85
- # Print Assistant thoughts
86
- if assistant_reply_json != {}:
87
- validate_json(assistant_reply_json, "llm_response_format_1")
88
- # Get command name and arguments
89
- try:
90
- print_assistant_thoughts(self.ai_name, assistant_reply_json)
91
- command_name, arguments = get_command(assistant_reply_json)
92
- # command_name, arguments = assistant_reply_json_valid["command"]["name"], assistant_reply_json_valid["command"]["args"]
93
- if cfg.speak_mode:
94
- say_text(f"I want to execute {command_name}")
95
- except Exception as e:
96
- logger.error("Error: \n", str(e))
97
-
98
- if not cfg.continuous_mode and self.next_action_count == 0:
99
- ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
100
- # Get key press: Prompt the user to press enter to continue or escape
101
- # to exit
102
- logger.typewriter_log(
103
- "NEXT ACTION: ",
104
- Fore.CYAN,
105
- f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} "
106
- f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
107
- )
108
- print(
109
- "Enter 'y' to authorise command, 'y -N' to run N continuous "
110
- "commands, 'n' to exit program, or enter feedback for "
111
- f"{self.ai_name}...",
112
- flush=True,
113
- )
114
- while True:
115
- console_input = clean_input(
116
- Fore.MAGENTA + "Input:" + Style.RESET_ALL
117
- )
118
- if console_input.lower().strip() == "y":
119
- user_input = "GENERATE NEXT COMMAND JSON"
120
- break
121
- elif console_input.lower().strip() == "":
122
- print("Invalid input format.")
123
- continue
124
- elif console_input.lower().startswith("y -"):
125
- try:
126
- self.next_action_count = abs(
127
- int(console_input.split(" ")[1])
128
- )
129
- user_input = "GENERATE NEXT COMMAND JSON"
130
- except ValueError:
131
- print(
132
- "Invalid input format. Please enter 'y -n' where n is"
133
- " the number of continuous tasks."
134
- )
135
- continue
136
- break
137
- elif console_input.lower() == "n":
138
- user_input = "EXIT"
139
- break
140
- else:
141
- user_input = console_input
142
- command_name = "human_feedback"
143
- break
144
-
145
- if user_input == "GENERATE NEXT COMMAND JSON":
146
- logger.typewriter_log(
147
- "-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
148
- Fore.MAGENTA,
149
- "",
150
- )
151
- elif user_input == "EXIT":
152
- print("Exiting...", flush=True)
153
- break
154
- else:
155
- # Print command
156
- logger.typewriter_log(
157
- "NEXT ACTION: ",
158
- Fore.CYAN,
159
- f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL}"
160
- f" ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
161
- )
162
-
163
- # Execute command
164
- if command_name is not None and command_name.lower().startswith("error"):
165
- result = (
166
- f"Command {command_name} threw the following error: {arguments}"
167
- )
168
- elif command_name == "human_feedback":
169
- result = f"Human feedback: {user_input}"
170
- else:
171
- result = (
172
- f"Command {command_name} returned: "
173
- f"{execute_command(command_name, arguments)}"
174
- )
175
- if self.next_action_count > 0:
176
- self.next_action_count -= 1
177
-
178
- memory_to_add = (
179
- f"Assistant Reply: {assistant_reply} "
180
- f"\nResult: {result} "
181
- f"\nHuman Feedback: {user_input} "
182
- )
183
-
184
- self.memory.add(memory_to_add)
185
-
186
- # Check if there's a result from the command append it to the message
187
- # history
188
- if result is not None:
189
- self.full_message_history.append(create_chat_message("system", result))
190
- logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
191
- else:
192
- self.full_message_history.append(
193
- create_chat_message("system", "Unable to execute command")
194
- )
195
- logger.typewriter_log(
196
- "SYSTEM: ", Fore.YELLOW, "Unable to execute command"
197
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/meme_generator/memes/decent_kiss/__init__.py DELETED
@@ -1,18 +0,0 @@
1
- from pathlib import Path
2
- from typing import List
3
-
4
- from pil_utils import BuildImage
5
-
6
- from meme_generator import add_meme
7
-
8
- img_dir = Path(__file__).parent / "images"
9
-
10
-
11
- def decent_kiss(images: List[BuildImage], texts, args):
12
- img = images[0].convert("RGBA").resize((589, 340), keep_ratio=True)
13
- frame = BuildImage.open(img_dir / "0.png")
14
- frame.paste(img, (0, 91), below=True)
15
- return frame.save_jpg()
16
-
17
-
18
- add_meme("decent_kiss", decent_kiss, min_images=1, max_images=1, keywords=["像样的亲亲"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cong723/gpt-academic-public/crazy_functions/test_project/cpp/cppipc/prod_cons.h DELETED
@@ -1,433 +0,0 @@
1
- #pragma once
2
-
3
- #include <atomic>
4
- #include <utility>
5
- #include <cstring>
6
- #include <type_traits>
7
- #include <cstdint>
8
-
9
- #include "libipc/def.h"
10
-
11
- #include "libipc/platform/detail.h"
12
- #include "libipc/circ/elem_def.h"
13
- #include "libipc/utility/log.h"
14
- #include "libipc/utility/utility.h"
15
-
16
- namespace ipc {
17
-
18
- ////////////////////////////////////////////////////////////////
19
- /// producer-consumer implementation
20
- ////////////////////////////////////////////////////////////////
21
-
22
- template <typename Flag>
23
- struct prod_cons_impl;
24
-
25
- template <>
26
- struct prod_cons_impl<wr<relat::single, relat::single, trans::unicast>> {
27
-
28
- template <std::size_t DataSize, std::size_t AlignSize>
29
- struct elem_t {
30
- std::aligned_storage_t<DataSize, AlignSize> data_ {};
31
- };
32
-
33
- alignas(cache_line_size) std::atomic<circ::u2_t> rd_; // read index
34
- alignas(cache_line_size) std::atomic<circ::u2_t> wt_; // write index
35
-
36
- constexpr circ::u2_t cursor() const noexcept {
37
- return 0;
38
- }
39
-
40
- template <typename W, typename F, typename E>
41
- bool push(W* /*wrapper*/, F&& f, E* elems) {
42
- auto cur_wt = circ::index_of(wt_.load(std::memory_order_relaxed));
43
- if (cur_wt == circ::index_of(rd_.load(std::memory_order_acquire) - 1)) {
44
- return false; // full
45
- }
46
- std::forward<F>(f)(&(elems[cur_wt].data_));
47
- wt_.fetch_add(1, std::memory_order_release);
48
- return true;
49
- }
50
-
51
- /**
52
- * In single-single-unicast, 'force_push' means 'no reader' or 'the only one reader is dead'.
53
- * So we could just disconnect all connections of receiver, and return false.
54
- */
55
- template <typename W, typename F, typename E>
56
- bool force_push(W* wrapper, F&&, E*) {
57
- wrapper->elems()->disconnect_receiver(~static_cast<circ::cc_t>(0u));
58
- return false;
59
- }
60
-
61
- template <typename W, typename F, typename R, typename E>
62
- bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) {
63
- auto cur_rd = circ::index_of(rd_.load(std::memory_order_relaxed));
64
- if (cur_rd == circ::index_of(wt_.load(std::memory_order_acquire))) {
65
- return false; // empty
66
- }
67
- std::forward<F>(f)(&(elems[cur_rd].data_));
68
- std::forward<R>(out)(true);
69
- rd_.fetch_add(1, std::memory_order_release);
70
- return true;
71
- }
72
- };
73
-
74
- template <>
75
- struct prod_cons_impl<wr<relat::single, relat::multi , trans::unicast>>
76
- : prod_cons_impl<wr<relat::single, relat::single, trans::unicast>> {
77
-
78
- template <typename W, typename F, typename E>
79
- bool force_push(W* wrapper, F&&, E*) {
80
- wrapper->elems()->disconnect_receiver(1);
81
- return false;
82
- }
83
-
84
- template <typename W, typename F, typename R,
85
- template <std::size_t, std::size_t> class E, std::size_t DS, std::size_t AS>
86
- bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E<DS, AS>* elems) {
87
- byte_t buff[DS];
88
- for (unsigned k = 0;;) {
89
- auto cur_rd = rd_.load(std::memory_order_relaxed);
90
- if (circ::index_of(cur_rd) ==
91
- circ::index_of(wt_.load(std::memory_order_acquire))) {
92
- return false; // empty
93
- }
94
- std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff));
95
- if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) {
96
- std::forward<F>(f)(buff);
97
- std::forward<R>(out)(true);
98
- return true;
99
- }
100
- ipc::yield(k);
101
- }
102
- }
103
- };
104
-
105
- template <>
106
- struct prod_cons_impl<wr<relat::multi , relat::multi, trans::unicast>>
107
- : prod_cons_impl<wr<relat::single, relat::multi, trans::unicast>> {
108
-
109
- using flag_t = std::uint64_t;
110
-
111
- template <std::size_t DataSize, std::size_t AlignSize>
112
- struct elem_t {
113
- std::aligned_storage_t<DataSize, AlignSize> data_ {};
114
- std::atomic<flag_t> f_ct_ { 0 }; // commit flag
115
- };
116
-
117
- alignas(cache_line_size) std::atomic<circ::u2_t> ct_; // commit index
118
-
119
- template <typename W, typename F, typename E>
120
- bool push(W* /*wrapper*/, F&& f, E* elems) {
121
- circ::u2_t cur_ct, nxt_ct;
122
- for (unsigned k = 0;;) {
123
- cur_ct = ct_.load(std::memory_order_relaxed);
124
- if (circ::index_of(nxt_ct = cur_ct + 1) ==
125
- circ::index_of(rd_.load(std::memory_order_acquire))) {
126
- return false; // full
127
- }
128
- if (ct_.compare_exchange_weak(cur_ct, nxt_ct, std::memory_order_acq_rel)) {
129
- break;
130
- }
131
- ipc::yield(k);
132
- }
133
- auto* el = elems + circ::index_of(cur_ct);
134
- std::forward<F>(f)(&(el->data_));
135
- // set flag & try update wt
136
- el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
137
- while (1) {
138
- auto cac_ct = el->f_ct_.load(std::memory_order_acquire);
139
- if (cur_ct != wt_.load(std::memory_order_relaxed)) {
140
- return true;
141
- }
142
- if ((~cac_ct) != cur_ct) {
143
- return true;
144
- }
145
- if (!el->f_ct_.compare_exchange_strong(cac_ct, 0, std::memory_order_relaxed)) {
146
- return true;
147
- }
148
- wt_.store(nxt_ct, std::memory_order_release);
149
- cur_ct = nxt_ct;
150
- nxt_ct = cur_ct + 1;
151
- el = elems + circ::index_of(cur_ct);
152
- }
153
- return true;
154
- }
155
-
156
- template <typename W, typename F, typename E>
157
- bool force_push(W* wrapper, F&&, E*) {
158
- wrapper->elems()->disconnect_receiver(1);
159
- return false;
160
- }
161
-
162
- template <typename W, typename F, typename R,
163
- template <std::size_t, std::size_t> class E, std::size_t DS, std::size_t AS>
164
- bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E<DS, AS>* elems) {
165
- byte_t buff[DS];
166
- for (unsigned k = 0;;) {
167
- auto cur_rd = rd_.load(std::memory_order_relaxed);
168
- auto cur_wt = wt_.load(std::memory_order_acquire);
169
- auto id_rd = circ::index_of(cur_rd);
170
- auto id_wt = circ::index_of(cur_wt);
171
- if (id_rd == id_wt) {
172
- auto* el = elems + id_wt;
173
- auto cac_ct = el->f_ct_.load(std::memory_order_acquire);
174
- if ((~cac_ct) != cur_wt) {
175
- return false; // empty
176
- }
177
- if (el->f_ct_.compare_exchange_weak(cac_ct, 0, std::memory_order_relaxed)) {
178
- wt_.store(cur_wt + 1, std::memory_order_release);
179
- }
180
- k = 0;
181
- }
182
- else {
183
- std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff));
184
- if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) {
185
- std::forward<F>(f)(buff);
186
- std::forward<R>(out)(true);
187
- return true;
188
- }
189
- ipc::yield(k);
190
- }
191
- }
192
- }
193
- };
194
-
195
- template <>
196
- struct prod_cons_impl<wr<relat::single, relat::multi, trans::broadcast>> {
197
-
198
- using rc_t = std::uint64_t;
199
-
200
- enum : rc_t {
201
- ep_mask = 0x00000000ffffffffull,
202
- ep_incr = 0x0000000100000000ull
203
- };
204
-
205
- template <std::size_t DataSize, std::size_t AlignSize>
206
- struct elem_t {
207
- std::aligned_storage_t<DataSize, AlignSize> data_ {};
208
- std::atomic<rc_t> rc_ { 0 }; // read-counter
209
- };
210
-
211
- alignas(cache_line_size) std::atomic<circ::u2_t> wt_; // write index
212
- alignas(cache_line_size) rc_t epoch_ { 0 }; // only one writer
213
-
214
- circ::u2_t cursor() const noexcept {
215
- return wt_.load(std::memory_order_acquire);
216
- }
217
-
218
- template <typename W, typename F, typename E>
219
- bool push(W* wrapper, F&& f, E* elems) {
220
- E* el;
221
- for (unsigned k = 0;;) {
222
- circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
223
- if (cc == 0) return false; // no reader
224
- el = elems + circ::index_of(wt_.load(std::memory_order_relaxed));
225
- // check all consumers have finished reading this element
226
- auto cur_rc = el->rc_.load(std::memory_order_acquire);
227
- circ::cc_t rem_cc = cur_rc & ep_mask;
228
- if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch_)) {
229
- return false; // has not finished yet
230
- }
231
- // consider rem_cc to be 0 here
232
- if (el->rc_.compare_exchange_weak(
233
- cur_rc, epoch_ | static_cast<rc_t>(cc), std::memory_order_release)) {
234
- break;
235
- }
236
- ipc::yield(k);
237
- }
238
- std::forward<F>(f)(&(el->data_));
239
- wt_.fetch_add(1, std::memory_order_release);
240
- return true;
241
- }
242
-
243
- template <typename W, typename F, typename E>
244
- bool force_push(W* wrapper, F&& f, E* elems) {
245
- E* el;
246
- epoch_ += ep_incr;
247
- for (unsigned k = 0;;) {
248
- circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
249
- if (cc == 0) return false; // no reader
250
- el = elems + circ::index_of(wt_.load(std::memory_order_relaxed));
251
- // check all consumers have finished reading this element
252
- auto cur_rc = el->rc_.load(std::memory_order_acquire);
253
- circ::cc_t rem_cc = cur_rc & ep_mask;
254
- if (cc & rem_cc) {
255
- ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc);
256
- cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers
257
- if (cc == 0) return false; // no reader
258
- }
259
- // just compare & exchange
260
- if (el->rc_.compare_exchange_weak(
261
- cur_rc, epoch_ | static_cast<rc_t>(cc), std::memory_order_release)) {
262
- break;
263
- }
264
- ipc::yield(k);
265
- }
266
- std::forward<F>(f)(&(el->data_));
267
- wt_.fetch_add(1, std::memory_order_release);
268
- return true;
269
- }
270
-
271
- template <typename W, typename F, typename R, typename E>
272
- bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E* elems) {
273
- if (cur == cursor()) return false; // acquire
274
- auto* el = elems + circ::index_of(cur++);
275
- std::forward<F>(f)(&(el->data_));
276
- for (unsigned k = 0;;) {
277
- auto cur_rc = el->rc_.load(std::memory_order_acquire);
278
- if ((cur_rc & ep_mask) == 0) {
279
- std::forward<R>(out)(true);
280
- return true;
281
- }
282
- auto nxt_rc = cur_rc & ~static_cast<rc_t>(wrapper->connected_id());
283
- if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) {
284
- std::forward<R>(out)((nxt_rc & ep_mask) == 0);
285
- return true;
286
- }
287
- ipc::yield(k);
288
- }
289
- }
290
- };
291
-
292
- template <>
293
- struct prod_cons_impl<wr<relat::multi, relat::multi, trans::broadcast>> {
294
-
295
- using rc_t = std::uint64_t;
296
- using flag_t = std::uint64_t;
297
-
298
- enum : rc_t {
299
- rc_mask = 0x00000000ffffffffull,
300
- ep_mask = 0x00ffffffffffffffull,
301
- ep_incr = 0x0100000000000000ull,
302
- ic_mask = 0xff000000ffffffffull,
303
- ic_incr = 0x0000000100000000ull
304
- };
305
-
306
- template <std::size_t DataSize, std::size_t AlignSize>
307
- struct elem_t {
308
- std::aligned_storage_t<DataSize, AlignSize> data_ {};
309
- std::atomic<rc_t > rc_ { 0 }; // read-counter
310
- std::atomic<flag_t> f_ct_ { 0 }; // commit flag
311
- };
312
-
313
- alignas(cache_line_size) std::atomic<circ::u2_t> ct_; // commit index
314
- alignas(cache_line_size) std::atomic<rc_t> epoch_ { 0 };
315
-
316
- circ::u2_t cursor() const noexcept {
317
- return ct_.load(std::memory_order_acquire);
318
- }
319
-
320
- constexpr static rc_t inc_rc(rc_t rc) noexcept {
321
- return (rc & ic_mask) | ((rc + ic_incr) & ~ic_mask);
322
- }
323
-
324
- constexpr static rc_t inc_mask(rc_t rc) noexcept {
325
- return inc_rc(rc) & ~rc_mask;
326
- }
327
-
328
- template <typename W, typename F, typename E>
329
- bool push(W* wrapper, F&& f, E* elems) {
330
- E* el;
331
- circ::u2_t cur_ct;
332
- rc_t epoch = epoch_.load(std::memory_order_acquire);
333
- for (unsigned k = 0;;) {
334
- circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
335
- if (cc == 0) return false; // no reader
336
- el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed));
337
- // check all consumers have finished reading this element
338
- auto cur_rc = el->rc_.load(std::memory_order_relaxed);
339
- circ::cc_t rem_cc = cur_rc & rc_mask;
340
- if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch)) {
341
- return false; // has not finished yet
342
- }
343
- else if (!rem_cc) {
344
- auto cur_fl = el->f_ct_.load(std::memory_order_acquire);
345
- if ((cur_fl != cur_ct) && cur_fl) {
346
- return false; // full
347
- }
348
- }
349
- // consider rem_cc to be 0 here
350
- if (el->rc_.compare_exchange_weak(
351
- cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast<rc_t>(cc), std::memory_order_relaxed) &&
352
- epoch_.compare_exchange_weak(epoch, epoch, std::memory_order_acq_rel)) {
353
- break;
354
- }
355
- ipc::yield(k);
356
- }
357
- // only one thread/process would touch here at one time
358
- ct_.store(cur_ct + 1, std::memory_order_release);
359
- std::forward<F>(f)(&(el->data_));
360
- // set flag & try update wt
361
- el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
362
- return true;
363
- }
364
-
365
- template <typename W, typename F, typename E>
366
- bool force_push(W* wrapper, F&& f, E* elems) {
367
- E* el;
368
- circ::u2_t cur_ct;
369
- rc_t epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr;
370
- for (unsigned k = 0;;) {
371
- circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
372
- if (cc == 0) return false; // no reader
373
- el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed));
374
- // check all consumers have finished reading this element
375
- auto cur_rc = el->rc_.load(std::memory_order_acquire);
376
- circ::cc_t rem_cc = cur_rc & rc_mask;
377
- if (cc & rem_cc) {
378
- ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc);
379
- cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers
380
- if (cc == 0) return false; // no reader
381
- }
382
- // just compare & exchange
383
- if (el->rc_.compare_exchange_weak(
384
- cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast<rc_t>(cc), std::memory_order_relaxed)) {
385
- if (epoch == epoch_.load(std::memory_order_acquire)) {
386
- break;
387
- }
388
- else if (push(wrapper, std::forward<F>(f), elems)) {
389
- return true;
390
- }
391
- epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr;
392
- }
393
- ipc::yield(k);
394
- }
395
- // only one thread/process would touch here at one time
396
- ct_.store(cur_ct + 1, std::memory_order_release);
397
- std::forward<F>(f)(&(el->data_));
398
- // set flag & try update wt
399
- el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
400
- return true;
401
- }
402
-
403
- template <typename W, typename F, typename R, typename E, std::size_t N>
404
- bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E(& elems)[N]) {
405
- auto* el = elems + circ::index_of(cur);
406
- auto cur_fl = el->f_ct_.load(std::memory_order_acquire);
407
- if (cur_fl != ~static_cast<flag_t>(cur)) {
408
- return false; // empty
409
- }
410
- ++cur;
411
- std::forward<F>(f)(&(el->data_));
412
- for (unsigned k = 0;;) {
413
- auto cur_rc = el->rc_.load(std::memory_order_acquire);
414
- if ((cur_rc & rc_mask) == 0) {
415
- std::forward<R>(out)(true);
416
- el->f_ct_.store(cur + N - 1, std::memory_order_release);
417
- return true;
418
- }
419
- auto nxt_rc = inc_rc(cur_rc) & ~static_cast<rc_t>(wrapper->connected_id());
420
- bool last_one = false;
421
- if ((last_one = (nxt_rc & rc_mask) == 0)) {
422
- el->f_ct_.store(cur + N - 1, std::memory_order_release);
423
- }
424
- if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) {
425
- std::forward<R>(out)(last_one);
426
- return true;
427
- }
428
- ipc::yield(k);
429
- }
430
- }
431
- };
432
-
433
- } // namespace ipc