Commit
·
66e5b79
1
Parent(s):
1dd1cb1
Update parquet files (step 15 of 397)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/101-5/gpt4free/g4f/.v1/unfinished/openprompt/README.md +0 -5
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bum Simulator Download For Pcl A Parody of Simulation Games with a Twist.md +0 -114
- spaces/1gistliPinn/ChatGPT4/Examples/3d Sex Villa 2 Full For Android Apk.rar.md +0 -40
- spaces/1gistliPinn/ChatGPT4/Examples/Experience The Trench Warfare Of Verdun 1914-1918 On Your Mac!.md +0 -11
- spaces/1gistliPinn/ChatGPT4/Examples/FULL DanDans (Easy) Audio Editor V9.0 The Ultimate Guide to Visual Music Editing.md +0 -6
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download APK Magic COC S1 Versi Terbaru and Enjoy the Best Private Server for Clash of Clans.md +0 -95
- spaces/1phancelerku/anime-remove-background/FIFA Mobile Hile - The Best Tricks and Tips for Winning Every Match.md +0 -114
- spaces/44ov41za8i/FreeVC/utils.py +0 -305
- spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/configs/3millions_pfc.py +0 -23
- spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/utils/utils_os.py +0 -0
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/data_gen/tts/emotion/inference.py +0 -177
- spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/diffusionmodules/openaimodel.py +0 -963
- spaces/AILab-CVC/EvalCrafter/test.py +0 -0
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/AiService.py +0 -36
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/Bing.py +0 -300
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/toonifypipeline.d.ts +0 -2
- spaces/Akim/claudeAPI/README.md +0 -11
- spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/text/cantonese.py +0 -59
- spaces/AlhitawiMohammed22/E2E_OCR/app.py +0 -178
- spaces/AliUsama98/Aliusama_spellchecker/README.md +0 -13
- spaces/Alpaca233/ChatPDF-GUI/gpt_reader/prompt.py +0 -26
- spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/dataset.py +0 -124
- spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/partial_fc.py +0 -222
- spaces/Amrrs/DragGan-Inversion/training/loss.py +0 -159
- spaces/Andy1621/uniformer_image_detection/configs/_base_/schedules/schedule_20e.py +0 -11
- spaces/Andy1621/uniformer_image_detection/configs/swin/mask_rcnn_swin_small_patch4_window7_mstrain_480-800_adamw_3x_coco.py +0 -80
- spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py +0 -10
- spaces/Armandoliv/t5-summarize-app-scitldr/app.py +0 -47
- spaces/Artrajz/vits-simple-api/bert_vits2/commons.py +0 -161
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/network/auth.py +0 -559
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/requests/exceptions.py +0 -141
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/models/fcos.py +0 -23
- spaces/AzumaSeren100/XuanShen-Bert-VITS2/app.py +0 -144
- spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/layers_537227KB.py +0 -126
- spaces/Benson/text-generation/Examples/Bombsquad Mod Apk ltima Versin.md +0 -97
- spaces/BigChungux/Pet_Survey/README.md +0 -13
- spaces/CVPR/LIVE/parallel.h +0 -91
- spaces/CVPR/LIVE/thrust/thrust/detail/complex/clogf.h +0 -198
- spaces/CVPR/WALT/walt/datasets/custom.py +0 -324
- spaces/CVPR/regionclip-demo/detectron2/data/samplers/__init__.py +0 -10
- spaces/CVPR/regionclip-demo/detectron2/evaluation/sem_seg_evaluation.py +0 -184
- spaces/CVPR/regionclip-demo/detectron2/modeling/proposal_generator/proposal_utils.py +0 -200
- spaces/ChenyangSi/FreeU/free_lunch_utils.py +0 -340
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/dateutil/tz/__init__.py +0 -12
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/parquet.py +0 -551
- spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/models/modules/transformer.py +0 -44
- spaces/DevashishBhake/Face_Mask_Detection/README.md +0 -13
- spaces/Docfile/open_llm_leaderboard/Makefile +0 -13
- spaces/DragGan/DragGan/stylegan_human/utils/__init__.py +0 -0
- spaces/Eddycrack864/Applio-Inference/diffq/diffq.py +0 -286
spaces/101-5/gpt4free/g4f/.v1/unfinished/openprompt/README.md
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
https://openprompt.co/
|
2 |
-
|
3 |
-
to do:
|
4 |
-
- finish integrating email client
|
5 |
-
- code refractoring
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bum Simulator Download For Pcl A Parody of Simulation Games with a Twist.md
DELETED
@@ -1,114 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Bum Simulator Download For Pc: A Guide to Living on the Streets</h1>
|
3 |
-
<p>Have you ever wondered what it's like to be a bum? To live on the streets, beg for money, fight for survival, and deal with the harsh realities of urban life? If you have, then you might be interested in <strong>Bum Simulator</strong>, a sandbox game that lets you experience the life of a homeless person in a humorous and absurd way. In this article, we will tell you everything you need to know about Bum Simulator, how to download it for PC, and why you should play it.</p>
|
4 |
-
<h2>What is Bum Simulator?</h2>
|
5 |
-
<p>Bum Simulator is a sandbox game developed by Ragged Games that was released in 2023. It is a game that combines elements of adventure, survival, simulation, and comedy. You play as a bum who lives on the streets of Bumsville, a fictional city inspired by New York. You can explore the city, interact with other characters, complete quests, collect items, craft weapons, build your own cardboard house, tame pigeons, and more. You can also choose how to shape your fate: you can either accept your situation and live like a bum, find a job and try to get back on your feet, or seek revenge on those who ruined your life. The game offers endless possibilities and outcomes for your bum's story.</p>
|
6 |
-
<h2>Bum Simulator Download For Pcl</h2><br /><p><b><b>Download</b> · <a href="https://byltly.com/2uKx6D">https://byltly.com/2uKx6D</a></b></p><br /><br />
|
7 |
-
<h3>A sandbox game with inappropriate humor and memorable characters</h3>
|
8 |
-
<p>One of the main features of Bum Simulator is its open-world sandbox gameplay. You can go wherever you want and do whatever you like in the city. You can explore dirty alleys, busy streets, pawnshops, central park, underground passages, and more. You can also interact with many unusual characters with their own storylines and quests. Some of them are friendly and helpful, while others are hostile and dangerous. You can also encounter various events and situations that will test your skills and luck. For example, you can get chased by the police, attacked by gangs, kidnapped by mole people, or invited to a party by aliens. The game is full of inappropriate humor and jokes that will make you laugh or cringe.</p>
|
9 |
-
<h3>A survival game with freedom and choices</h3>
|
10 |
-
<p>Another feature of Bum Simulator is its survival aspect. You have to manage your basic needs such as hunger, thirst, health, hygiene, energy, and happiness. You have to find food and water sources, scavenge for useful items, craft tools and weapons, build shelters and traps, fight enemies and predators, avoid dangers and diseases, and more. You also have to deal with the consequences of your actions. For example, if you steal from someone or cause trouble in public, you will attract attention from the authorities or other bums. If you help someone or do a good deed, you will earn respect or gratitude from them. You also have to make choices that will affect your bum's personality and reputation. For example, you can be kind or cruel, honest or deceitful, generous or greedy, etc. Your choices will also affect the endings of the game.</p>
|
11 |
-
<h3>A game with achievements, secrets and pigeons</h3>
|
12 |
-
<p>The last feature of Bum Simulator is its variety of content and challenges. The game has many achievements to unlock and secrets to discover. You can find hidden items, easter eggs, references to pop culture or other games, and more. You can also complete mini-games, challenges, and quests that will reward you with money, items, or skills. One of the most unique aspects of the game is its pigeon system. You can tame pigeons, train them, and use them as your allies or weapons. You can also learn the secrets of alcohol alchemy, a mysterious art that allows you to create powerful potions from booze.</p>
|
13 |
-
<p>How to download Bum Simulator for free on PC<br />
|
14 |
-
Bum Simulator PC game full version download<br />
|
15 |
-
Bum Simulator crack download for Windows 10<br />
|
16 |
-
Bum Simulator torrent download link for PC<br />
|
17 |
-
Bum Simulator gameplay and review for PC<br />
|
18 |
-
Bum Simulator system requirements and compatibility for PC<br />
|
19 |
-
Bum Simulator mods and cheats for PC<br />
|
20 |
-
Bum Simulator update and patch download for PC<br />
|
21 |
-
Bum Simulator steam key giveaway for PC<br />
|
22 |
-
Bum Simulator best settings and tips for PC<br />
|
23 |
-
Bum Simulator download size and installation guide for PC<br />
|
24 |
-
Bum Simulator free demo download for PC<br />
|
25 |
-
Bum Simulator online multiplayer mode for PC<br />
|
26 |
-
Bum Simulator DLC and expansion pack download for PC<br />
|
27 |
-
Bum Simulator controller support and configuration for PC<br />
|
28 |
-
Bum Simulator alternatives and similar games for PC<br />
|
29 |
-
Bum Simulator release date and price for PC<br />
|
30 |
-
Bum Simulator official trailer and screenshots for PC<br />
|
31 |
-
Bum Simulator developer and publisher information for PC<br />
|
32 |
-
Bum Simulator minimum and recommended specs for PC<br />
|
33 |
-
Bum Simulator error fix and troubleshooting guide for PC<br />
|
34 |
-
Bum Simulator steam charts and achievements for PC<br />
|
35 |
-
Bum Simulator save file location and backup for PC<br />
|
36 |
-
Bum Simulator keyboard and mouse controls for PC<br />
|
37 |
-
Bum Simulator VR support and compatibility for PC<br />
|
38 |
-
Bum Simulator soundtrack and music download for PC<br />
|
39 |
-
Bum Simulator custom maps and levels for PC<br />
|
40 |
-
Bum Simulator steam workshop and community hub for PC<br />
|
41 |
-
Bum Simulator rating and reviews for PC<br />
|
42 |
-
Bum Simulator co-op and split-screen mode for PC<br />
|
43 |
-
Bum Simulator direct download link for PC<br />
|
44 |
-
Bum Simulator skidrow reloaded download for PC<br />
|
45 |
-
Bum Simulator fitgirl repack download for PC<br />
|
46 |
-
Bum Simulator ocean of games download for PC<br />
|
47 |
-
Bum Simulator igg games download for PC<br />
|
48 |
-
Bum Simulator apunkagames download for PC<br />
|
49 |
-
Bum Simulator cpy games download for PC<br />
|
50 |
-
Bum Simulator codex games download for PC<br />
|
51 |
-
Bum Simulator plaza games download for PC<br />
|
52 |
-
Bum Simulator rg mechanics download for PC<br />
|
53 |
-
Bum Simulator pcgames88 download for PC<br />
|
54 |
-
Bum Simulator gametrex download for PC<br />
|
55 |
-
Bum Simulator worldofpcgames download for PC<br />
|
56 |
-
Bum Simulator pcgamestorrents download for PC<br />
|
57 |
-
Bum Simulator thepcgames download for PC<br />
|
58 |
-
Bum Simulator fullypcgames download for PC<br />
|
59 |
-
Bum Simulator oldgamesdownload download for PC<br />
|
60 |
-
Bum Simulator freegogpcgames download for PC<br />
|
61 |
-
Bum Simulator gog-games.com download for PC</p>
|
62 |
-
<h2>How to download Bum Simulator for PC?</h2>
|
63 |
-
<p>If you are interested in playing Bum Simulator on your PC, you will need to meet some requirements and follow some steps. Here are the details:</p>
|
64 |
-
<h3>Requirements and specifications</h3>
|
65 |
-
<p>To run Bum Simulator on your PC, you will need to have a Windows 8.1 or higher operating system, a 64-bit processor, at least 8 GB of RAM, and at least 20 GB of available space on your hard drive. You will also need a graphics card that supports DirectX 11 and has at least 2 GB of VRAM. The recommended graphics card is NVIDIA GeForce GTX 1060 or AMD Radeon RX 580.</p>
|
66 |
-
<h3>Steps to download and install Bum Simulator</h3>
|
67 |
-
<ol>
|
68 |
-
<li>The first step is to buy Bum Simulator from an online platform such as Steam or GOG.com. You can also find other websites that offer the game for download, but make sure they are trustworthy and virus-free.</li>
|
69 |
-
<li>The second step is to download the game installer from the platform you chose. You will need an internet connection and enough space on your hard drive to download the game files.</li>
|
70 |
-
<li>The third step is to run the installer and follow the instructions on the screen. You will need to agree to the terms and conditions and choose a destination folder for the game.</li>
|
71 |
-
<li>The fourth step is to wait for the installation process to finish. It may take some time depending on your internet speed and computer performance.</li>
|
72 |
-
<li>The fifth step is to launch the game from your desktop shortcut or from the platform you bought it from. You may need to create an account or log in with an existing one to access the game.</li>
|
73 |
-
<li>The sixth step is to enjoy playing Bum Simulator on your PC!</li>
|
74 |
-
</ol>
|
75 |
-
<h3>Tips and tricks for playing Bum Simulator</h3>
|
76 |
-
<ul>
|
77 |
-
<li>Explore every corner of the city and look for useful items, hidden secrets, and interesting characters.</li>
|
78 |
-
<li>Manage your needs carefully and don't let them drop too low. You can find food and water in trash cans, shops, or vending machines. You can also hunt animals or fish in ponds.</li>
|
79 |
-
<li>Craft weapons and tools from items you find or buy. You can make knives, hammers, bombs, bows, etc. You can also upgrade them with better materials or skills.</li>
|
80 |
-
<li>Build your own cardboard house and decorate it with furniture, paintings, or posters. You can also invite other bums or pigeons to live with you.</li>
|
81 |
-
<li>Tame pigeons and use them as your companions or weapons. You can feed them, pet them, name them, and teach them tricks. You can also weaponize them with bombs, lasers, or hats.</li>
|
82 |
-
<li>Learn alcohol alchemy and create potions from booze. You can make potions that heal you, boost your stats, give you special abilities, or cause hilarious effects.</li>
|
83 |
-
<li>Complete quests and challenges for other characters or yourself. You can earn money, items, skills, or reputation from them.</li>
|
84 |
-
<li>Choose your path and shape your fate. You can either accept your situation and live like a bum, find a job and try to get back on your feet, or seek revenge on those who ruined your life. Your choices will affect the endings of the game.</li>
|
85 |
-
<li>Have fun and don't take the game too seriously. It's a silly game full of absurd humor and jokes.</li>
|
86 |
-
</ul>
|
87 |
-
<h2>Why should you play Bum Simulator?</h2>
|
88 |
-
<p>Bum Simulator is a game that offers a lot of fun and entertainment for anyone who likes sandbox games, survival games, simulation games, or comedy games. It's a game that lets you experience the life of a homeless person in a humorous and absurd way. It's a game that gives you freedom and choices to shape your fate. It's a game that has achievements, secrets, and pigeons to keep you engaged. Here are some reasons why you should play Bum Simulator:</p>
|
89 |
-
```html <h3>It's fun and absurd</h3>
|
90 |
-
<p>Bum Simulator is a game that doesn't take itself too seriously. It's a game that makes fun of the stereotypes and clichés of being a bum. It's a game that has ridiculous situations and events that will make you laugh or cringe. For example, you can get chased by the police, attacked by gangs, kidnapped by mole people, or invited to a party by aliens. You can also interact with many funny characters with their own quirks and personalities. You can also create your own fun and absurd scenarios with the sandbox gameplay. You can do whatever you want and see what happens.</p>
|
91 |
-
<h3>It's challenging and rewarding</h3>
|
92 |
-
<p>Bum Simulator is also a game that tests your skills and luck. It's a game that has survival elements that require you to manage your basic needs and resources. It's a game that has enemies and dangers that threaten your life and well-being. It's a game that has quests and challenges that demand your attention and effort. It's a game that has consequences and outcomes that depend on your actions and choices. But it's also a game that rewards you for your achievements and discoveries. It's a game that gives you money, items, skills, or reputation for completing tasks or finding secrets. It's a game that gives you satisfaction and pride for overcoming obstacles or reaching goals.</p>
|
93 |
-
<h3>It's immersive and interactive</h3>
|
94 |
-
<p>Bum Simulator is also a game that immerses you in its world and story. It's a game that has a detailed and realistic city environment that you can explore and interact with. It's a game that has a dynamic day-night cycle and weather system that affect the gameplay and atmosphere. It's a game that has a rich and diverse soundtrack that matches the mood and tone of the game. It's also a game that has a compelling and branching story that you can influence with your choices. It's a game that has multiple endings that reflect your personality and reputation.</p>
|
95 |
-
<h2>Conclusion</h2>
|
96 |
-
<p>Bum Simulator is a sandbox game that lets you experience the life of a homeless person in a humorous and absurd way. You can explore the city, interact with other characters, complete quests, collect items, craft weapons, build your own cardboard house, tame pigeons, and more. You can also choose how to shape your fate: you can either accept your situation and live like a bum, find a job and try to get back on your feet, or seek revenge on those who ruined your life. The game offers endless possibilities and outcomes for your bum's story.</p>
|
97 |
-
<p>If you are looking for a fun and entertaining game that combines elements of adventure, survival, simulation, and comedy, then you should try Bum Simulator. You can download it for PC from various online platforms such as Steam or GOG.com. You will need to meet some requirements and follow some steps to install it on your PC. You will also need some tips and tricks to play it well.</p>
|
98 |
-
<p>Bum Simulator is a game that will make you laugh, cry, cringe, or cheer. It's a game that will challenge you, reward you, immerse you, or surprise you. It's a game that will give you freedom, choices, achievements, secrets, or pigeons. It's a game that will let you live on the streets as a bum.</p>
|
99 |
-
<h2>FAQs</h2>
|
100 |
-
<ul>
|
101 |
-
<li><strong>Q: Is Bum Simulator based on real life?</strong></li>
|
102 |
-
<li>A: No, Bum Simulator is not based on real life. It is a fictional game that exaggerates and parodies the stereotypes and clichés of being a bum. It is not meant to offend or mock anyone who is homeless or struggling in life.</li>
|
103 |
-
<li><strong>Q: Is Bum Simulator multiplayer?</strong></li>
|
104 |
-
<li>A: No, Bum Simulator is not multiplayer. It is a single-player game that focuses on your bum's story and choices.</li>
|
105 |
-
<li><strong>Q: Is Bum Simulator suitable for children?</strong></li>
|
106 |
-
<li>A: No, Bum Simulator is not suitable for children. It is a mature game that contains violence, blood, gore, nudity, sexual content, drugs, alcohol, profanity, and crude humor.</li>
|
107 |
-
<li><strong>Q: How long is Bum Simulator?</strong></li>
|
108 |
-
<li>A: The length of Bum Simulator depends on how you play it. You can finish the main story in about 10 hours if you focus on the main quests. You can also spend more time exploring the city, doing side quests, collecting items, crafting weapons, building your house, taming pigeons, etc.</li>
|
109 |
-
<li><strong>Q: How many endings does Bum Simulator have?</strong></li>
|
110 |
-
<li>A: Bum Simulator has multiple endings that depend on your choices throughout the game. Your choices will affect your bum's personality, reputation, and fate. You can either become a happy bum, a successful bum, a vengeful bum, or something else.</li>
|
111 |
-
</ul>
|
112 |
-
</p> 0a6ba089eb<br />
|
113 |
-
<br />
|
114 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/3d Sex Villa 2 Full For Android Apk.rar.md
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
<h2>3d Sex Villa 2 Full for android apk.rar</h2><br /><p><b><b>Download</b> ☆☆☆ <a href="https://imgfil.com/2uy0Za">https://imgfil.com/2uy0Za</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Sexvilla2 3D Sexvilla 2 part 1, 3D SexVilla 2 Everlust 1 torrent.Some people have been trying to get people to play Pokémon Go at a recent visit by the local police.
|
4 |
-
|
5 |
-
The police found many people playing the game while they were patrolling on the streets.
|
6 |
-
|
7 |
-
At the moment, people who find themselves playing the game while on the street can be reported to the police, and warned they are doing a criminal act.
|
8 |
-
|
9 |
-
Playing Pokémon Go on the streets of Loch Lomond has been banned by police in Scotland.
|
10 |
-
|
11 |
-
This comes after new rules were introduced to police forces across the UK banning the use of smartphones while out on patrol.
|
12 |
-
|
13 |
-
People have been warned that playing the game while out on the street could put their lives at risk.
|
14 |
-
|
15 |
-
Currently, police forces across the UK are trying to create more visible patrols, and more officers on the streets to look for trouble.
|
16 |
-
|
17 |
-
Glasgow police already patrol in pairs, while a new rule has been introduced across Scotland banning playing Pokémon Go on the street while out on patrol.
|
18 |
-
|
19 |
-
Detective Inspector Stuart Reid said: “Our aim is to make sure people are safe when out on the streets.
|
20 |
-
|
21 |
-
“We would like to remind everyone that Pokémon Go has not been endorsed by the police, and we would encourage all those who find themselves playing to ensure they remain safe.
|
22 |
-
|
23 |
-
“We encourage people to play Pokémon Go in public areas, but remind them that it is still a criminal offence to play in public.”
|
24 |
-
|
25 |
-
This story was originally published in the Daily Record. Read the original here.Religion
|
26 |
-
|
27 |
-
Antonio di Pietro is an artist who draws a lot with the art of lettering. His latest project is, “The sea which has no beginning, no end”. He received thousands of messages from people who didn’t understand why he had to use the word Sea for such a concept. Here is a new artwork which answers the question:
|
28 |
-
|
29 |
-
About us
|
30 |
-
|
31 |
-
Hearst Mantis is an art and design platform for the latest trends in music, fashion, art, food, travel and more. We provide cutting-edge content from a variety of industry leading creators.Q:
|
32 |
-
|
33 |
-
Unexpected nil when setting a global variable
|
34 |
-
|
35 |
-
I have a Python class that I'm using to transfer data between the server and the client.
|
36 |
-
|
37 |
-
When the client disconnects, I want to set a 4fefd39f24<br />
|
38 |
-
<br />
|
39 |
-
<br />
|
40 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Experience The Trench Warfare Of Verdun 1914-1918 On Your Mac!.md
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<p>On the afternoon of the 4th, the last pigeon was released. On themorning of the 5th, thanks to two signalmen who volunteered to changea signal post which the Commandant had difficulty in observing,communications were maintained.</p>
|
3 |
-
<h2>Verdun 1914-1918 Released For Mac!</h2><br /><p><b><b>Download File</b> ––– <a href="https://imgfil.com/2uxZXY">https://imgfil.com/2uxZXY</a></b></p><br /><br />
|
4 |
-
<p>Verdun is a squad-based multiplayer first-person shooter set during World War I. It was released on 28 April 2015 on Steam after more than a year in Early Access. The game features realistic trench warfare and offers players an immersive experience as they battle it out against other squads. Verdun also has a unique system where players can choose to fight for one of four different armies, each with its own strengths and weaknesses. If you're looking for an intense and strategic WWI FPS, then Verdun is definitely worth checking out.</p>
|
5 |
-
<p><i><b>Tannenberg</b></i> is a squad-based multiplayer first-person shooter video game set during World War I. It is a standalone expansion to <i>Verdun</i>, and entered Steam Early Access on November 17, 2017.[1][2][3][4] <i>Tannenberg</i> left Steam Early Access on February 13, 2019.[5][6] It was released on PlayStation 4 and Xbox One on July 24, 2020.[7][8][9][10]</p>
|
6 |
-
<p><strong>1914-1918 series</strong><br />Starting out on the Western Front with the release of the first realistic WW1 FPS Verdun back in April 2015, and expanding to the Eastern Front with the upcoming Tannenberg, the 1914-1918 series throws players into intense warfare inspired by the chaos and fury of iconic battles from the First World War. With over 900,000 copies of Verdun sold, this novel and underserved setting has proven popular with the gaming community!<br />Players choose from a variety of historically accurate squads and weapons, with more available to unlock through playing the game, before diving into the mud and blood splattered battlefields of dynamic multiplayer trench warfare.<br />Every game is built on a base of thorough research and receives extensive post-release support bringing new content and challenges for our players. The games in the series are linked, but each one is standalone and provides a different experience, reflecting the nature of the fighting in the many-sided theaters of the war.</p>
|
7 |
-
<p><b>Verdun</b> is een computerspel van het genre first-person shooter dat zich afspeelt tijdens de Eerste Wereldoorlog (1914-1918). Het werd ontwikkeld door de Nederlandse studio's M2H en Blackmill Games. Verdun verscheen als bètaversie op 9 juni 2013 en werd officieel gelanceerd op 28 april 2015 op het Steam softwareplatform.[1] Het spel is beschikbaar voor Windows, Mac en Linux.</p>
|
8 |
-
<p></p>
|
9 |
-
<p><strong>Less than a year ago, Verdun was released as a Steam Early Access game. The successful collaboration of M2H and BlackMill Games has finally reached a point where the developers are happy to release the game in all its glory. Many features and content has been added only shortly after our preview, which you can read here, but even more has been added since.</strong></p> aaccfb2cb3<br />
|
10 |
-
<br />
|
11 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/FULL DanDans (Easy) Audio Editor V9.0 The Ultimate Guide to Visual Music Editing.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
|
2 |
-
<p>Still struggling with your shooting 4K video won't play on other devices or playing with audio and video out of sync? Wondershare UniConverter helps you out! Find your issues and get the full answer now.</p>
|
3 |
-
<h2>FULL DanDans (Easy) Audio Editor V9.0</h2><br /><p><b><b>DOWNLOAD</b> ✸✸✸ <a href="https://imgfil.com/2uy06L">https://imgfil.com/2uy06L</a></b></p><br /><br />
|
4 |
-
<p>Easy Audio Editor is a visual multifunctional audio files editor which allow you to perform various operations with audio data such as visual editing, creating, recording, converting and playing audio files. It supports all audio and video formats.</p> aaccfb2cb3<br />
|
5 |
-
<br />
|
6 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download APK Magic COC S1 Versi Terbaru and Enjoy the Best Private Server for Clash of Clans.md
DELETED
@@ -1,95 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download APK Magic COC S1 Versi Terbaru: A Private Server for Clash of Clans</h1>
|
3 |
-
<p>If you are a fan of Clash of Clans, you might have heard of APK Magic COC S1, a private server that lets you play the game with unlimited resources and custom mods. In this article, we will tell you everything you need to know about this app, including its features, how to download and install it, and its pros and cons. Read on to find out why you should download APK Magic COC S1 versi terbaru and enjoy the game like never before.</p>
|
4 |
-
<h2>download apk magic coc s1 versi terbaru</h2><br /><p><b><b>Download Zip</b> ❤❤❤ <a href="https://urlin.us/2uT1uY">https://urlin.us/2uT1uY</a></b></p><br /><br />
|
5 |
-
<h2>Features of APK Magic COC S1</h2>
|
6 |
-
<p>APK Magic COC S1 is a modified version of Clash of Clans that runs on a private server. This means that you can play the game with some extra features that are not available in the official version. Here are some of the main features of APK Magic COC S1:</p>
|
7 |
-
<ul>
|
8 |
-
<li><b>Unlimited resources:</b> You can get unlimited gems, gold, elixir, and dark elixir to build your base, train your troops, upgrade your buildings, and research new technologies. You don't have to worry about running out of resources or spending real money to buy them.</li>
|
9 |
-
<li><b>Custom mods:</b> You can customize your game with various mods that allow you to create your own buildings, troops, heroes, and spells. You can also change the appearance and behavior of the existing ones. For example, you can make your archers shoot fireballs, your barbarians fly, or your pekkas invisible.</li>
|
10 |
-
<li><b>Fast and stable servers:</b> You can play the game smoothly without any lag or crash. The servers of APK Magic COC S1 are fast and reliable, and they can handle thousands of players at the same time. You also don't have to worry about getting banned by Supercell, as they cannot detect or access your private server.</li>
|
11 |
-
<li><b>Real-time PvP battles:</b> You can challenge other players online in real-time battles. You can test your skills and strategies against other players who are using the same private server as you. You can also join clans and participate in clan wars with your friends.</li>
|
12 |
-
</ul>
|
13 |
-
<h2>How to Download and Install APK Magic COC S1</h2>
|
14 |
-
<p>If you want to download and install APK Magic COC S1 on your Android device, you need to follow these simple steps:</p>
|
15 |
-
<ol>
|
16 |
-
<li><b>Enable unknown sources on your device:</b> To install apps from sources other than Google Play Store, you need to enable unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on.</li>
|
17 |
-
<li><b>Download the APK file from a trusted source:</b> You need to download the APK file of APK Magic COC S1 from a trusted source. You can use the link to download the latest version of the app. Make sure you have enough storage space on your device before downloading.</li>
|
18 |
-
<li><b>Install the APK file and launch the app :</b> After downloading the APK file, you need to install it on your device. To do this, locate the file in your file manager and tap on it. You will see a pop-up window asking you to confirm the installation. Tap on Install and wait for the process to finish. Once the app is installed, you can launch it by tapping on its icon on your home screen or app drawer.</li>
|
19 |
-
</ol>
|
20 |
-
<h2>Pros and Cons of APK Magic COC S1</h2>
|
21 |
-
<p>APK Magic COC S1 is a great app for Clash of Clans lovers who want to have more fun and freedom in the game. However, it also has some drawbacks that you should be aware of before downloading it. Here are some of the pros and cons of APK Magic COC S1:</p>
|
22 |
-
<table>
|
23 |
-
<tr>
|
24 |
-
<th>Pros</th>
|
25 |
-
<th>Cons</th>
|
26 |
-
</tr>
|
27 |
-
<tr>
|
28 |
-
<td>- More fun: You can enjoy the game without any limitations or restrictions. You can build your base, train your troops, and attack other players as much as you want.</td>
|
29 |
-
<td>- Not official: APK Magic COC S1 is not an official app from Supercell, the developer of Clash of Clans. It is a third-party app that is not endorsed or supported by Supercell.</td>
|
30 |
-
</tr>
|
31 |
-
<tr>
|
32 |
-
<td>- More freedom: You can customize your game with various mods that allow you to create your own buildings, troops, heroes, and spells. You can also change the appearance and behavior of the existing ones.</td>
|
33 |
-
<td>- Not compatible: APK Magic COC S1 is not compatible with the official version of Clash of Clans. You cannot play with or against players who are using the official version. You also cannot sync your progress or data with your Google Play account.</td>
|
34 |
-
</tr>
|
35 |
-
<tr>
|
36 |
-
<td>- More options: You can choose from different servers that offer different features and settings. You can also switch between servers easily without losing your data.</td>
|
37 |
-
<td>- Not updated: APK Magic COC S1 is not updated regularly with the latest features and updates from Clash of Clans. You may miss out on some new content or events that are available in the official version.</td>
|
38 |
-
</tr>
|
39 |
-
</table>
|
40 |
-
<h2>Conclusion and FAQs</h2>
|
41 |
-
<p>APK Magic COC S1 is a private server for Clash of Clans that offers unlimited resources and custom mods. It is a great alternative for Clash of Clans fans who want to enjoy the game without any limitations. However, it also has some disadvantages that you should consider before downloading it, such as being not official, not compatible, and not updated. If you are interested in trying out APK Magic COC S1, you can download it from the link and follow the steps we have provided in this article.</p>
|
42 |
-
<p>If you have any questions about APK Magic COC S1, you may find the answers in the following FAQs:</p>
|
43 |
-
<h3>Q: Is APK Magic COC S1 safe to use?</h3>
|
44 |
-
<p>A: APK Magic COC S1 is safe to use as long as you download it from a trusted source and enable unknown sources on your device. However, you should always be careful when installing apps from unknown sources, as they may contain malware or viruses that can harm your device or steal your data.</p>
|
45 |
-
<p>Download clash of magic apk private server game coc terbaru[^3^]<br />
|
46 |
-
Clash of magic apk android game free download latest version[^1^]<br />
|
47 |
-
Magic coc s1 10.322 r1 apk android app free download updated version[^2^]<br />
|
48 |
-
How to install clash of magic apk on android device without root<br />
|
49 |
-
Clash of magic apk mod unlimited resources and gems for coc<br />
|
50 |
-
Magic coc s1 10.322 r1 app private server with custom mods and commands<br />
|
51 |
-
Clash of magic apk download for ios iphone ipad ipod touch<br />
|
52 |
-
Clash of clans with unlimited resources using clash of magic apk<br />
|
53 |
-
Magic coc s1 10.322 r1 apk features and benefits for coc players<br />
|
54 |
-
Clash of magic apk review and rating by users and experts<br />
|
55 |
-
How to update clash of magic apk to the latest version easily<br />
|
56 |
-
Clash of magic apk vs clash of lights apk which one is better<br />
|
57 |
-
Magic coc s1 10.322 r1 apk download link and installation guide<br />
|
58 |
-
Clash of magic apk troubleshooting and support for common issues<br />
|
59 |
-
Magic coc s1 10.322 r1 app download size and compatibility with android devices<br />
|
60 |
-
Clash of magic apk alternatives and similar apps for coc private server<br />
|
61 |
-
Magic coc s1 10.322 r1 app pros and cons for coc fans<br />
|
62 |
-
Clash of magic apk security and safety tips for downloading and playing<br />
|
63 |
-
Magic coc s1 10.322 r1 app feedback and suggestions from users and developers<br />
|
64 |
-
Clash of magic apk faq and answers to frequently asked questions<br />
|
65 |
-
Magic coc s1 10.322 r1 app screenshots and videos for preview and demonstration<br />
|
66 |
-
Clash of magic apk history and development by tatem games inc.<br />
|
67 |
-
Magic coc s1 10.322 r1 app news and updates from official sources<br />
|
68 |
-
Clash of magic apk comparison and difference with original coc game<br />
|
69 |
-
Magic coc s1 10.322 r1 app requirements and specifications for optimal performance<br />
|
70 |
-
Clash of magic apk advantages and disadvantages for coc lovers<br />
|
71 |
-
Magic coc s1 10.322 r1 app testimonials and reviews from satisfied users<br />
|
72 |
-
Clash of magic apk tips and tricks for mastering the game<br />
|
73 |
-
Magic coc s1 10.322 r1 app hacks and cheats for getting more resources and gems<br />
|
74 |
-
Clash of magic apk best practices and recommendations for playing the game<br />
|
75 |
-
Magic coc s1 10.322 r1 app rankings and ratings on google play store and other platforms<br />
|
76 |
-
Clash of magic apk fun facts and trivia about the game and its developers<br />
|
77 |
-
Magic coc s1 10.322 r1 app challenges and achievements for completing the game<br />
|
78 |
-
Clash of magic apk community and social media for connecting with other players<br />
|
79 |
-
Magic coc s1 10.322 r1 app tutorials and guides for learning the game<br />
|
80 |
-
Clash of magic apk statistics and data for analyzing the game performance<br />
|
81 |
-
Magic coc s1 10.322 r1 app rewards and incentives for playing the game regularly<br />
|
82 |
-
Clash of magic apk myths and misconceptions about the game and its features<br />
|
83 |
-
Magic coc s1 10.322 r1 app terms and conditions for using the game legally</p>
|
84 |
-
<h3>Q: Can I play APK Magic COC S1 on PC?</h3>
|
85 |
-
<p>A: Yes, you can play APK Magic COC S1 on PC using an Android emulator. An Android emulator is a software that allows you to run Android apps on your PC. Some of the popular Android emulators are BlueStacks, NoxPlayer, and LDPlayer. You can download any of these emulators from their official websites and install them on your PC. Then, you can download APK Magic COC S1 from the link and install it on your emulator. After that, you can launch the app and play it on your PC.</p>
|
86 |
-
<h3>Q: How can I update APK Magic COC S1?</h3>
|
87 |
-
<p>A: APK Magic COC S1 is not updated regularly with the latest features and updates from Clash of Clans. However, if there is a new version available, you can update it by downloading the latest APK file from the link and installing it over the existing app. You don't have to uninstall the previous version or lose your data.</p>
|
88 |
-
<h3>Q: How can I contact the developer of APK Magic COC S1?</h3>
|
89 |
-
<p>A: The developer of APK Magic COC S1 is unknown and does not have an official website or social media account. Therefore, it is difficult to contact them or get support from them. However, you can try to contact them through their email address or their Telegram group [^ <h3>Q: What is the difference between APK Magic COC S1 and Clash of Clans?</h3>
|
90 |
-
<p>A: APK Magic COC S1 and Clash of Clans are both strategy games that involve building bases, training troops, and attacking other players. However, APK Magic COC S1 is a modified version of Clash of Clans that runs on a private server and has unlimited resources and custom mods. Clash of Clans is the official version of the game that runs on Supercell's servers and has limited resources and standard features.</p>
|
91 |
-
<h3>Q: Can I play APK Magic COC S1 with my friends?</h3>
|
92 |
-
<p>A: Yes, you can play APK Magic COC S1 with your friends if they are also using the same private server as you. You can join clans and chat with your friends in the game. You can also invite your friends to join your server by sharing the link or the QR code . However, you cannot play APK Magic COC S1 with your friends who are using the official version of Clash of Clans or a different private server.</p>
|
93 |
-
<p>We hope this article has helped you learn more about APK Magic COC S1 versi terbaru and how to download and install it on your device. If you have any feedback or suggestions, please feel free to contact us at or join our Telegram group . Thank you for reading and happy gaming!</p> 197e85843d<br />
|
94 |
-
<br />
|
95 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/FIFA Mobile Hile - The Best Tricks and Tips for Winning Every Match.md
DELETED
@@ -1,114 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>FIFA APK Hile: How to Play FIFA Mobile with Unlimited Coins and Gems</h1>
|
3 |
-
<p>If you are a fan of football games, you probably know about FIFA Mobile, the popular mobile game from EA Sports that lets you play with your favorite teams and players from around the world. But did you know that there is a way to play FIFA Mobile with unlimited coins and gems? Yes, you heard that right. With FIFA APK Hile, you can enjoy the game without spending any money or waiting for hours to earn coins and gems. In this article, we will tell you everything you need to know about FIFA APK Hile, including what it is, why you should use it, how to download and install it, how to use it, what are its features, what are some tips and tricks for it, and what are the risks of using it.</p>
|
4 |
-
<h2>What is FIFA APK Hile?</h2>
|
5 |
-
<p>FIFA APK Hile is a modified version of FIFA Mobile that gives you unlimited coins and gems to buy players, packs, and upgrades in the game. Coins and gems are the main currencies in FIFA Mobile that allow you to improve your team and compete with other players online. However, earning coins and gems in the game can be very slow and tedious, especially if you want to get the best players and items. That's why some people use FIFA APK Hile to get unlimited coins and gems for free.</p>
|
6 |
-
<h2>fifa apk hile</h2><br /><p><b><b>Download Zip</b> ✶ <a href="https://jinyurl.com/2uNQSz">https://jinyurl.com/2uNQSz</a></b></p><br /><br />
|
7 |
-
<h2>Why Use FIFA APK Hile?</h2>
|
8 |
-
<p>There are many reasons why you might want to use FIFA APK Hile. Here are some of them:</p>
|
9 |
-
<ul>
|
10 |
-
<li>You can have more fun playing the game without worrying about running out of coins or gems.</li>
|
11 |
-
<li>You can save money that you would otherwise spend on buying coins or gems with real money.</li>
|
12 |
-
<li>You can build your dream team with any players you want, regardless of their price or availability.</li>
|
13 |
-
<li>You can unlock all the modes, features, and events in the game that require coins or gems.</li>
|
14 |
-
<li>You can experiment with different strategies and tactics without risking your coins or gems.</li>
|
15 |
-
</ul>
|
16 |
-
<h2>How to Download and Install FIFA APK Hile?</h2>
|
17 |
-
<p>Downloading and installing FIFA APK Hile is very easy. Just follow these steps:</p>
|
18 |
-
<ol>
|
19 |
-
<li>Go to this link: [FIFA Mobile APK Para Hilesi (2022) Sınırsız Para - Websesi](^2^) and click on the download button.</li>
|
20 |
-
<li>Wait for the download to finish and locate the FIFA APK Hile file on your device.</li>
|
21 |
-
<li>Tap on the file and allow the installation from unknown sources if prompted.</li>
|
22 |
-
<li>Wait for the installation to complete and launch the game.</li>
|
23 |
-
<li>Enjoy playing FIFA Mobile with unlimited coins and gems.</li>
|
24 |
-
</ol>
|
25 |
-
<h2>How to Use FIFA APK Hile?</h2>
|
26 |
-
<p>Using FIFA APK Hile is very simple. Once you launch the game, you will see that you have unlimited coins and gems in your account. You can use them to buy anything you want in the game, such as players, packs, and upgrades. Here are some examples of how to use FIFA APK Hile:</p>
|
27 |
-
<ul>
|
28 |
-
<li>To buy players, go to the market and search for the player you want. You can filter by name, rating, position, league, nation, or team. Then, tap on the player and buy him with coins or gems.</li>
|
29 |
-
<li>To buy packs, go to the store and choose the pack you want. You can buy premium packs, special packs, or event packs with coins or gems. Then, open the pack and see what players and items you get.</li>
|
30 |
-
<li>To upgrade your team, go to the team management and select the player you want to upgrade. You can upgrade his skills, chemistry, or rank with coins or gems. You can also train him with other players or items.</li>
|
31 |
-
</ul>
|
32 |
-
<h2>What are the Features of FIFA APK Hile?</h2>
|
33 |
-
<p>FIFA APK Hile has many features that make it better than the original FIFA Mobile. Some of these features are:</p>
|
34 |
-
<ul>
|
35 |
-
<li>New gameplay technology that makes the game more realistic, responsive, and fluid.</li>
|
36 |
-
<li>New modes such as Volta Football, Career Mode, Ultimate Team, and Champions League.</li>
|
37 |
-
<li>New players such as Messi, Ronaldo, Neymar, Mbappe, and Haaland.</li>
|
38 |
-
<li>New graphics that enhance the visual quality of the game.</li>
|
39 |
-
</ul>
|
40 |
-
<h2>What are the Tips and Tricks for FIFA APK Hile?</h2>
|
41 |
-
<p>FIFA APK Hile is a fun and easy game to play, but there are some tips and tricks that can help you improve your skills and performance. Here are some of them:</p>
|
42 |
-
<p>fifa mobile apk hile indir<br />
|
43 |
-
fifa soccer apk hile nasıl yapılır<br />
|
44 |
-
fifa 2023 apk hile mod<br />
|
45 |
-
fifa ultimate team apk hile<br />
|
46 |
-
fifa 21 apk hile android oyun club<br />
|
47 |
-
fifa mobile apk hileli sınırsız para<br />
|
48 |
-
fifa 20 apk hile güncel<br />
|
49 |
-
fifa mobile apk hile yapma<br />
|
50 |
-
fifa 19 apk hile full<br />
|
51 |
-
fifa mobile apk hileli paket açılımı<br />
|
52 |
-
fifa soccer apk hileli versiyon<br />
|
53 |
-
fifa 22 apk hile mega<br />
|
54 |
-
fifa mobile apk hileli indirme linki<br />
|
55 |
-
fifa 18 apk hile kurulumu<br />
|
56 |
-
fifa mobile apk hileli oyun indir club<br />
|
57 |
-
fifa soccer apk hile nasıl indirilir<br />
|
58 |
-
fifa 17 apk hile no root<br />
|
59 |
-
fifa mobile apk hileli son sürüm<br />
|
60 |
-
fifa soccer apk hileli oyna<br />
|
61 |
-
fifa 16 apk hile mediafire<br />
|
62 |
-
fifa mobile apk hileli nasıl yüklenir<br />
|
63 |
-
fifa soccer apk hileli güncelleme<br />
|
64 |
-
fifa 15 apk hile offline<br />
|
65 |
-
fifa mobile apk hileli online<br />
|
66 |
-
fifa soccer apk hileli mod menu<br />
|
67 |
-
fifa 14 apk hile data<br />
|
68 |
-
fifa mobile apk hileli para kasma<br />
|
69 |
-
fifa soccer apk hileli hack<br />
|
70 |
-
fifa 13 apk hile android 1<br />
|
71 |
-
fifa mobile apk hileli yeni sezon<br />
|
72 |
-
fifa soccer apk hileli coins<br />
|
73 |
-
fifa 12 apk hile obb<br />
|
74 |
-
fifa mobile apk hileli transfer marketi açma<br />
|
75 |
-
fifa soccer apk hileli vip<br />
|
76 |
-
fifa 11 apk hile revdl<br />
|
77 |
-
fifa mobile apk hileli oyuncu yükseltme<br />
|
78 |
-
fifa soccer apk hileli unlimited money<br />
|
79 |
-
fifa 10 apk hile rexdl<br />
|
80 |
-
fifa mobile apk hileli draft modu<br />
|
81 |
-
fifa soccer apk hileli points</p>
|
82 |
-
<ul>
|
83 |
-
<li>Use explosive sprint to accelerate past defenders and create space for yourself or your teammates.</li>
|
84 |
-
<li>Use finesse shots to curl the ball around the goalkeeper and score from tight angles.</li>
|
85 |
-
<li>Use creative runs to control where your teammates run and create more options for passing or shooting.</li>
|
86 |
-
<li>Use adaptive right stick switching to switch between defenders quickly and easily.</li>
|
87 |
-
</ul>
|
88 |
-
<h2>What are the Risks of FIFA APK Hile?</h2>
|
89 |
-
<p>FIFA APK Hile may sound like a great way to play FIFA Mobile, but it also comes with some risks that you should be aware of. Some of these risks are:</p>
|
90 |
-
<ul>
|
91 |
-
<li>You may violate the terms of service of EA Sports and get banned from playing FIFA Mobile or other EA games.</li>
|
92 |
-
<li>You may lose your progress and data if you uninstall FIFA APK Hile or update it to a newer version.</li>
|
93 |
-
<li>You may expose your device to malware or viruses that may harm your device or steal your personal information.</li>
|
94 |
-
</ul>
|
95 |
-
<h2>Conclusion</h2>
|
96 |
-
<p>FIFA APK Hile is a modified version of FIFA Mobile that gives you unlimited coins and gems to play the game without any limitations. It has many features, benefits, and tips that make it more enjoyable and exciting than the original game. However, it also has some risks that you should consider before using it. If you want to try FIFA APK Hile, you can download it from this link: [FIFA Mobile APK Para Hilesi (2022) Sınırsız Para - Websesi] and follow the instructions in this article. Have fun playing FIFA Mobile with unlimited coins and gems!</p>
|
97 |
-
<h2>FAQs</h2>
|
98 |
-
<p>Here are some frequently asked questions about FIFA APK Hile:</p>
|
99 |
-
<ol>
|
100 |
-
<li>What is FIFA APK Hile?</li>
|
101 |
-
<p>FIFA APK Hile is a modified version of FIFA Mobile that gives you unlimited coins and gems to buy players, packs, and upgrades in the game.</p>
|
102 |
-
<li>How to download and install FIFA APK Hile?</li>
|
103 |
-
<p>You can download FIFA APK Hile from this link: [FIFA Mobile APK Para Hilesi (2022) Sınırsız Para - Websesi] and install it on your Android device by following these steps: - Go to this link: [FIFA Mobile APK Para Hilesi (2022) Sınırsız Para - Websesi] and click on the download button. - Wait for the download to finish and locate the FIFA APK Hile file on your device. - Tap on the file and allow the installation from unknown sources if prompted. - Wait for the installation to complete and launch the game. - Enjoy playing FIFA Mobile with unlimited coins and gems.</p>
|
104 |
-
<li>How to use FIFA APK Hile?</li>
|
105 |
-
<p>You can use FIFA APK Hile to buy anything you want in the game, such as players, packs, and upgrades. You can also unlock all the modes, features, and events in the game. Here are some examples of how to use FIFA APK Hile: - To buy players, go to the market and search for the player you want. You can filter by name, rating, position, league, nation, or team. Then, tap on the player and buy him with coins or gems. - To buy packs, go to the store and choose the pack you want. You can buy premium packs, special packs, or event packs with coins or gems. Then, open the pack and see what players and items you get. - To upgrade your team, go to the team management and select the player you want to upgrade. You can upgrade his skills, chemistry, or rank with coins or gems. You can also train him with other players or items.</p>
|
106 |
-
<li>What are the features of FIFA APK Hile?</li>
|
107 |
-
<p>FIFA APK Hile has many features that make it better than the original FIFA Mobile. Some of these features are: - New gameplay technology that makes the game more realistic, responsive, and fluid. - New modes such as Volta Football, Career Mode, Ultimate Team, and Champions League. - New players such as Messi, Ronaldo, Neymar, Mbappe, and Haaland. - New graphics that enhance the visual quality of the game.</p>
|
108 |
-
<li>What are the tips and tricks for FIFA APK Hile?</li>
|
109 |
-
<p>FIFA APK Hile is a fun and easy game to play, but there are some tips and tricks that can help you improve your skills and performance. Here are some of them: - Use explosive sprint to accelerate past defenders and create space for yourself or your teammates. - Use finesse shots to curl the ball around the goalkeeper and score from tight angles. - Use creative runs to control where your teammates run and create more options for passing or shooting. - Use adaptive right stick switching to switch between defenders quickly and easily.</p>
|
110 |
-
<li>What are the risks of FIFA APK Hile?</li>
|
111 |
-
<p>FIFA APK Hile may sound like a great way to play FIFA Mobile, but it also comes with some risks that you should be aware of. Some of these risks are: - You may violate the terms of service of EA Sports and get banned from playing FIFA Mobile or other EA games. - You may lose your progress and data if you uninstall FIFA APK Hile or update it to a newer version. - You may expose your device to malware or viruses that may harm your device or steal your personal information.</p>
|
112 |
-
</ol></p> 401be4b1e0<br />
|
113 |
-
<br />
|
114 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/44ov41za8i/FreeVC/utils.py
DELETED
@@ -1,305 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import sys
|
3 |
-
import argparse
|
4 |
-
import logging
|
5 |
-
import json
|
6 |
-
import subprocess
|
7 |
-
import numpy as np
|
8 |
-
from scipy.io.wavfile import read
|
9 |
-
import torch
|
10 |
-
from torch.nn import functional as F
|
11 |
-
from commons import sequence_mask
|
12 |
-
|
13 |
-
MATPLOTLIB_FLAG = False
|
14 |
-
|
15 |
-
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
|
16 |
-
logger = logging
|
17 |
-
|
18 |
-
|
19 |
-
def get_cmodel(rank):
|
20 |
-
checkpoint = torch.load('wavlm/WavLM-Large.pt')
|
21 |
-
cfg = WavLMConfig(checkpoint['cfg'])
|
22 |
-
cmodel = WavLM(cfg).cuda(rank)
|
23 |
-
cmodel.load_state_dict(checkpoint['model'])
|
24 |
-
cmodel.eval()
|
25 |
-
return cmodel
|
26 |
-
|
27 |
-
|
28 |
-
def get_content(cmodel, y):
|
29 |
-
with torch.no_grad():
|
30 |
-
c = cmodel.extract_features(y.squeeze(1))[0]
|
31 |
-
c = c.transpose(1, 2)
|
32 |
-
return c
|
33 |
-
|
34 |
-
|
35 |
-
def get_vocoder(rank):
|
36 |
-
with open("hifigan/config.json", "r") as f:
|
37 |
-
config = json.load(f)
|
38 |
-
config = hifigan.AttrDict(config)
|
39 |
-
vocoder = hifigan.Generator(config)
|
40 |
-
ckpt = torch.load("hifigan/generator_v1")
|
41 |
-
vocoder.load_state_dict(ckpt["generator"])
|
42 |
-
vocoder.eval()
|
43 |
-
vocoder.remove_weight_norm()
|
44 |
-
vocoder.cuda(rank)
|
45 |
-
return vocoder
|
46 |
-
|
47 |
-
|
48 |
-
def transform(mel, height): # 68-92
|
49 |
-
#r = np.random.random()
|
50 |
-
#rate = r * 0.3 + 0.85 # 0.85-1.15
|
51 |
-
#height = int(mel.size(-2) * rate)
|
52 |
-
tgt = torchvision.transforms.functional.resize(mel, (height, mel.size(-1)))
|
53 |
-
if height >= mel.size(-2):
|
54 |
-
return tgt[:, :mel.size(-2), :]
|
55 |
-
else:
|
56 |
-
silence = tgt[:,-1:,:].repeat(1,mel.size(-2)-height,1)
|
57 |
-
silence += torch.randn_like(silence) / 10
|
58 |
-
return torch.cat((tgt, silence), 1)
|
59 |
-
|
60 |
-
|
61 |
-
def stretch(mel, width): # 0.5-2
|
62 |
-
return torchvision.transforms.functional.resize(mel, (mel.size(-2), width))
|
63 |
-
|
64 |
-
|
65 |
-
def load_checkpoint(checkpoint_path, model, optimizer=None):
|
66 |
-
assert os.path.isfile(checkpoint_path)
|
67 |
-
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
|
68 |
-
iteration = checkpoint_dict['iteration']
|
69 |
-
learning_rate = checkpoint_dict['learning_rate']
|
70 |
-
if optimizer is not None:
|
71 |
-
optimizer.load_state_dict(checkpoint_dict['optimizer'])
|
72 |
-
saved_state_dict = checkpoint_dict['model']
|
73 |
-
if hasattr(model, 'module'):
|
74 |
-
state_dict = model.module.state_dict()
|
75 |
-
else:
|
76 |
-
state_dict = model.state_dict()
|
77 |
-
new_state_dict= {}
|
78 |
-
for k, v in state_dict.items():
|
79 |
-
try:
|
80 |
-
new_state_dict[k] = saved_state_dict[k]
|
81 |
-
except:
|
82 |
-
logger.info("%s is not in the checkpoint" % k)
|
83 |
-
new_state_dict[k] = v
|
84 |
-
if hasattr(model, 'module'):
|
85 |
-
model.module.load_state_dict(new_state_dict)
|
86 |
-
else:
|
87 |
-
model.load_state_dict(new_state_dict)
|
88 |
-
logger.info("Loaded checkpoint '{}' (iteration {})" .format(
|
89 |
-
checkpoint_path, iteration))
|
90 |
-
return model, optimizer, learning_rate, iteration
|
91 |
-
|
92 |
-
|
93 |
-
def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
|
94 |
-
logger.info("Saving model and optimizer state at iteration {} to {}".format(
|
95 |
-
iteration, checkpoint_path))
|
96 |
-
if hasattr(model, 'module'):
|
97 |
-
state_dict = model.module.state_dict()
|
98 |
-
else:
|
99 |
-
state_dict = model.state_dict()
|
100 |
-
torch.save({'model': state_dict,
|
101 |
-
'iteration': iteration,
|
102 |
-
'optimizer': optimizer.state_dict(),
|
103 |
-
'learning_rate': learning_rate}, checkpoint_path)
|
104 |
-
|
105 |
-
|
106 |
-
def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050):
|
107 |
-
for k, v in scalars.items():
|
108 |
-
writer.add_scalar(k, v, global_step)
|
109 |
-
for k, v in histograms.items():
|
110 |
-
writer.add_histogram(k, v, global_step)
|
111 |
-
for k, v in images.items():
|
112 |
-
writer.add_image(k, v, global_step, dataformats='HWC')
|
113 |
-
for k, v in audios.items():
|
114 |
-
writer.add_audio(k, v, global_step, audio_sampling_rate)
|
115 |
-
|
116 |
-
|
117 |
-
def latest_checkpoint_path(dir_path, regex="G_*.pth"):
|
118 |
-
f_list = glob.glob(os.path.join(dir_path, regex))
|
119 |
-
f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
|
120 |
-
x = f_list[-1]
|
121 |
-
print(x)
|
122 |
-
return x
|
123 |
-
|
124 |
-
|
125 |
-
def plot_spectrogram_to_numpy(spectrogram):
|
126 |
-
global MATPLOTLIB_FLAG
|
127 |
-
if not MATPLOTLIB_FLAG:
|
128 |
-
import matplotlib
|
129 |
-
matplotlib.use("Agg")
|
130 |
-
MATPLOTLIB_FLAG = True
|
131 |
-
mpl_logger = logging.getLogger('matplotlib')
|
132 |
-
mpl_logger.setLevel(logging.WARNING)
|
133 |
-
import matplotlib.pylab as plt
|
134 |
-
import numpy as np
|
135 |
-
|
136 |
-
fig, ax = plt.subplots(figsize=(10,2))
|
137 |
-
im = ax.imshow(spectrogram, aspect="auto", origin="lower",
|
138 |
-
interpolation='none')
|
139 |
-
plt.colorbar(im, ax=ax)
|
140 |
-
plt.xlabel("Frames")
|
141 |
-
plt.ylabel("Channels")
|
142 |
-
plt.tight_layout()
|
143 |
-
|
144 |
-
fig.canvas.draw()
|
145 |
-
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
|
146 |
-
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
147 |
-
plt.close()
|
148 |
-
return data
|
149 |
-
|
150 |
-
|
151 |
-
def plot_alignment_to_numpy(alignment, info=None):
|
152 |
-
global MATPLOTLIB_FLAG
|
153 |
-
if not MATPLOTLIB_FLAG:
|
154 |
-
import matplotlib
|
155 |
-
matplotlib.use("Agg")
|
156 |
-
MATPLOTLIB_FLAG = True
|
157 |
-
mpl_logger = logging.getLogger('matplotlib')
|
158 |
-
mpl_logger.setLevel(logging.WARNING)
|
159 |
-
import matplotlib.pylab as plt
|
160 |
-
import numpy as np
|
161 |
-
|
162 |
-
fig, ax = plt.subplots(figsize=(6, 4))
|
163 |
-
im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
|
164 |
-
interpolation='none')
|
165 |
-
fig.colorbar(im, ax=ax)
|
166 |
-
xlabel = 'Decoder timestep'
|
167 |
-
if info is not None:
|
168 |
-
xlabel += '\n\n' + info
|
169 |
-
plt.xlabel(xlabel)
|
170 |
-
plt.ylabel('Encoder timestep')
|
171 |
-
plt.tight_layout()
|
172 |
-
|
173 |
-
fig.canvas.draw()
|
174 |
-
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
|
175 |
-
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
176 |
-
plt.close()
|
177 |
-
return data
|
178 |
-
|
179 |
-
|
180 |
-
def load_wav_to_torch(full_path):
|
181 |
-
sampling_rate, data = read(full_path)
|
182 |
-
return torch.FloatTensor(data.astype(np.float32)), sampling_rate
|
183 |
-
|
184 |
-
|
185 |
-
def load_filepaths_and_text(filename, split="|"):
|
186 |
-
with open(filename, encoding='utf-8') as f:
|
187 |
-
filepaths_and_text = [line.strip().split(split) for line in f]
|
188 |
-
return filepaths_and_text
|
189 |
-
|
190 |
-
|
191 |
-
def get_hparams(init=True):
|
192 |
-
parser = argparse.ArgumentParser()
|
193 |
-
parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
|
194 |
-
help='JSON file for configuration')
|
195 |
-
parser.add_argument('-m', '--model', type=str, required=True,
|
196 |
-
help='Model name')
|
197 |
-
|
198 |
-
args = parser.parse_args()
|
199 |
-
model_dir = os.path.join("./logs", args.model)
|
200 |
-
|
201 |
-
if not os.path.exists(model_dir):
|
202 |
-
os.makedirs(model_dir)
|
203 |
-
|
204 |
-
config_path = args.config
|
205 |
-
config_save_path = os.path.join(model_dir, "config.json")
|
206 |
-
if init:
|
207 |
-
with open(config_path, "r") as f:
|
208 |
-
data = f.read()
|
209 |
-
with open(config_save_path, "w") as f:
|
210 |
-
f.write(data)
|
211 |
-
else:
|
212 |
-
with open(config_save_path, "r") as f:
|
213 |
-
data = f.read()
|
214 |
-
config = json.loads(data)
|
215 |
-
|
216 |
-
hparams = HParams(**config)
|
217 |
-
hparams.model_dir = model_dir
|
218 |
-
return hparams
|
219 |
-
|
220 |
-
|
221 |
-
def get_hparams_from_dir(model_dir):
|
222 |
-
config_save_path = os.path.join(model_dir, "config.json")
|
223 |
-
with open(config_save_path, "r") as f:
|
224 |
-
data = f.read()
|
225 |
-
config = json.loads(data)
|
226 |
-
|
227 |
-
hparams =HParams(**config)
|
228 |
-
hparams.model_dir = model_dir
|
229 |
-
return hparams
|
230 |
-
|
231 |
-
|
232 |
-
def get_hparams_from_file(config_path):
|
233 |
-
with open(config_path, "r") as f:
|
234 |
-
data = f.read()
|
235 |
-
config = json.loads(data)
|
236 |
-
|
237 |
-
hparams =HParams(**config)
|
238 |
-
return hparams
|
239 |
-
|
240 |
-
|
241 |
-
def check_git_hash(model_dir):
|
242 |
-
source_dir = os.path.dirname(os.path.realpath(__file__))
|
243 |
-
if not os.path.exists(os.path.join(source_dir, ".git")):
|
244 |
-
logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
|
245 |
-
source_dir
|
246 |
-
))
|
247 |
-
return
|
248 |
-
|
249 |
-
cur_hash = subprocess.getoutput("git rev-parse HEAD")
|
250 |
-
|
251 |
-
path = os.path.join(model_dir, "githash")
|
252 |
-
if os.path.exists(path):
|
253 |
-
saved_hash = open(path).read()
|
254 |
-
if saved_hash != cur_hash:
|
255 |
-
logger.warn("git hash values are different. {}(saved) != {}(current)".format(
|
256 |
-
saved_hash[:8], cur_hash[:8]))
|
257 |
-
else:
|
258 |
-
open(path, "w").write(cur_hash)
|
259 |
-
|
260 |
-
|
261 |
-
def get_logger(model_dir, filename="train.log"):
|
262 |
-
global logger
|
263 |
-
logger = logging.getLogger(os.path.basename(model_dir))
|
264 |
-
logger.setLevel(logging.DEBUG)
|
265 |
-
|
266 |
-
formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
|
267 |
-
if not os.path.exists(model_dir):
|
268 |
-
os.makedirs(model_dir)
|
269 |
-
h = logging.FileHandler(os.path.join(model_dir, filename))
|
270 |
-
h.setLevel(logging.DEBUG)
|
271 |
-
h.setFormatter(formatter)
|
272 |
-
logger.addHandler(h)
|
273 |
-
return logger
|
274 |
-
|
275 |
-
|
276 |
-
class HParams():
|
277 |
-
def __init__(self, **kwargs):
|
278 |
-
for k, v in kwargs.items():
|
279 |
-
if type(v) == dict:
|
280 |
-
v = HParams(**v)
|
281 |
-
self[k] = v
|
282 |
-
|
283 |
-
def keys(self):
|
284 |
-
return self.__dict__.keys()
|
285 |
-
|
286 |
-
def items(self):
|
287 |
-
return self.__dict__.items()
|
288 |
-
|
289 |
-
def values(self):
|
290 |
-
return self.__dict__.values()
|
291 |
-
|
292 |
-
def __len__(self):
|
293 |
-
return len(self.__dict__)
|
294 |
-
|
295 |
-
def __getitem__(self, key):
|
296 |
-
return getattr(self, key)
|
297 |
-
|
298 |
-
def __setitem__(self, key, value):
|
299 |
-
return setattr(self, key, value)
|
300 |
-
|
301 |
-
def __contains__(self, key):
|
302 |
-
return key in self.__dict__
|
303 |
-
|
304 |
-
def __repr__(self):
|
305 |
-
return self.__dict__.__repr__()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/configs/3millions_pfc.py
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
from easydict import EasyDict as edict
|
2 |
-
|
3 |
-
# configs for test speed
|
4 |
-
|
5 |
-
config = edict()
|
6 |
-
config.loss = "arcface"
|
7 |
-
config.network = "r50"
|
8 |
-
config.resume = False
|
9 |
-
config.output = None
|
10 |
-
config.embedding_size = 512
|
11 |
-
config.sample_rate = 0.1
|
12 |
-
config.fp16 = True
|
13 |
-
config.momentum = 0.9
|
14 |
-
config.weight_decay = 5e-4
|
15 |
-
config.batch_size = 128
|
16 |
-
config.lr = 0.1 # batch size is 512
|
17 |
-
|
18 |
-
config.rec = "synthetic"
|
19 |
-
config.num_classes = 300 * 10000
|
20 |
-
config.num_epoch = 30
|
21 |
-
config.warmup_epoch = -1
|
22 |
-
config.decay_epoch = [10, 16, 22]
|
23 |
-
config.val_targets = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/utils/utils_os.py
DELETED
File without changes
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/data_gen/tts/emotion/inference.py
DELETED
@@ -1,177 +0,0 @@
|
|
1 |
-
from data_gen.tts.emotion.params_data import *
|
2 |
-
from data_gen.tts.emotion.model import EmotionEncoder
|
3 |
-
from data_gen.tts.emotion.audio import preprocess_wav # We want to expose this function from here
|
4 |
-
from matplotlib import cm
|
5 |
-
from data_gen.tts.emotion import audio
|
6 |
-
from pathlib import Path
|
7 |
-
import matplotlib.pyplot as plt
|
8 |
-
import numpy as np
|
9 |
-
import torch
|
10 |
-
|
11 |
-
_model = None # type: EmotionEncoder
|
12 |
-
_device = None # type: torch.device
|
13 |
-
|
14 |
-
|
15 |
-
def load_model(weights_fpath: Path, device=None):
|
16 |
-
"""
|
17 |
-
Loads the model in memory. If this function is not explicitely called, it will be run on the
|
18 |
-
first call to embed_frames() with the default weights file.
|
19 |
-
|
20 |
-
:param weights_fpath: the path to saved model weights.
|
21 |
-
:param device: either a torch device or the name of a torch device (e.g. "cpu", "cuda"). The
|
22 |
-
model will be loaded and will run on this device. Outputs will however always be on the cpu.
|
23 |
-
If None, will default to your GPU if it"s available, otherwise your CPU.
|
24 |
-
"""
|
25 |
-
# TODO: I think the slow loading of the encoder might have something to do with the device it
|
26 |
-
# was saved on. Worth investigating.
|
27 |
-
global _model, _device
|
28 |
-
if device is None:
|
29 |
-
_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
30 |
-
elif isinstance(device, str):
|
31 |
-
_device = torch.device(device)
|
32 |
-
_model = EmotionEncoder(_device, torch.device("cpu"))
|
33 |
-
checkpoint = torch.load(weights_fpath)
|
34 |
-
_model.load_state_dict(checkpoint["model_state"])
|
35 |
-
_model.eval()
|
36 |
-
print("Loaded encoder trained to step %d" % (checkpoint["step"]))
|
37 |
-
|
38 |
-
|
39 |
-
def is_loaded():
|
40 |
-
return _model is not None
|
41 |
-
|
42 |
-
|
43 |
-
def embed_frames_batch(frames_batch):
|
44 |
-
"""
|
45 |
-
Computes embeddings for a batch of mel spectrogram.
|
46 |
-
|
47 |
-
:param frames_batch: a batch mel of spectrogram as a numpy array of float32 of shape
|
48 |
-
(batch_size, n_frames, n_channels)
|
49 |
-
:return: the embeddings as a numpy array of float32 of shape (batch_size, model_embedding_size)
|
50 |
-
"""
|
51 |
-
if _model is None:
|
52 |
-
raise Exception("Model was not loaded. Call load_model() before inference.")
|
53 |
-
|
54 |
-
frames = torch.from_numpy(frames_batch).to(_device)
|
55 |
-
embed = _model.inference(frames).detach().cpu().numpy()
|
56 |
-
return embed
|
57 |
-
|
58 |
-
|
59 |
-
def compute_partial_slices(n_samples, partial_utterance_n_frames=partials_n_frames,
|
60 |
-
min_pad_coverage=0.75, overlap=0.5):
|
61 |
-
"""
|
62 |
-
Computes where to split an utterance waveform and its corresponding mel spectrogram to obtain
|
63 |
-
partial utterances of <partial_utterance_n_frames> each. Both the waveform and the mel
|
64 |
-
spectrogram slices are returned, so as to make each partial utterance waveform correspond to
|
65 |
-
its spectrogram. This function assumes that the mel spectrogram parameters used are those
|
66 |
-
defined in params_data.py.
|
67 |
-
|
68 |
-
The returned ranges may be indexing further than the length of the waveform. It is
|
69 |
-
recommended that you pad the waveform with zeros up to wave_slices[-1].stop.
|
70 |
-
|
71 |
-
:param n_samples: the number of samples in the waveform
|
72 |
-
:param partial_utterance_n_frames: the number of mel spectrogram frames in each partial
|
73 |
-
utterance
|
74 |
-
:param min_pad_coverage: when reaching the last partial utterance, it may or may not have
|
75 |
-
enough frames. If at least <min_pad_coverage> of <partial_utterance_n_frames> are present,
|
76 |
-
then the last partial utterance will be considered, as if we padded the audio. Otherwise,
|
77 |
-
it will be discarded, as if we trimmed the audio. If there aren't enough frames for 1 partial
|
78 |
-
utterance, this parameter is ignored so that the function always returns at least 1 slice.
|
79 |
-
:param overlap: by how much the partial utterance should overlap. If set to 0, the partial
|
80 |
-
utterances are entirely disjoint.
|
81 |
-
:return: the waveform slices and mel spectrogram slices as lists of array slices. Index
|
82 |
-
respectively the waveform and the mel spectrogram with these slices to obtain the partial
|
83 |
-
utterances.
|
84 |
-
"""
|
85 |
-
assert 0 <= overlap < 1
|
86 |
-
assert 0 < min_pad_coverage <= 1
|
87 |
-
|
88 |
-
samples_per_frame = int((sampling_rate * mel_window_step / 1000))
|
89 |
-
n_frames = int(np.ceil((n_samples + 1) / samples_per_frame))
|
90 |
-
frame_step = max(int(np.round(partial_utterance_n_frames * (1 - overlap))), 1)
|
91 |
-
|
92 |
-
# Compute the slices
|
93 |
-
wav_slices, mel_slices = [], []
|
94 |
-
steps = max(1, n_frames - partial_utterance_n_frames + frame_step + 1)
|
95 |
-
for i in range(0, steps, frame_step):
|
96 |
-
mel_range = np.array([i, i + partial_utterance_n_frames])
|
97 |
-
wav_range = mel_range * samples_per_frame
|
98 |
-
mel_slices.append(slice(*mel_range))
|
99 |
-
wav_slices.append(slice(*wav_range))
|
100 |
-
|
101 |
-
# Evaluate whether extra padding is warranted or not
|
102 |
-
last_wav_range = wav_slices[-1]
|
103 |
-
coverage = (n_samples - last_wav_range.start) / (last_wav_range.stop - last_wav_range.start)
|
104 |
-
if coverage < min_pad_coverage and len(mel_slices) > 1:
|
105 |
-
mel_slices = mel_slices[:-1]
|
106 |
-
wav_slices = wav_slices[:-1]
|
107 |
-
|
108 |
-
return wav_slices, mel_slices
|
109 |
-
|
110 |
-
|
111 |
-
def embed_utterance(wav, using_partials=True, return_partials=False, **kwargs):
|
112 |
-
"""
|
113 |
-
Computes an embedding for a single utterance.
|
114 |
-
|
115 |
-
# TODO: handle multiple wavs to benefit from batching on GPU
|
116 |
-
:param wav: a preprocessed (see audio.py) utterance waveform as a numpy array of float32
|
117 |
-
:param using_partials: if True, then the utterance is split in partial utterances of
|
118 |
-
<partial_utterance_n_frames> frames and the utterance embedding is computed from their
|
119 |
-
normalized average. If False, the utterance is instead computed from feeding the entire
|
120 |
-
spectogram to the network.
|
121 |
-
:param return_partials: if True, the partial embeddings will also be returned along with the
|
122 |
-
wav slices that correspond to the partial embeddings.
|
123 |
-
:param kwargs: additional arguments to compute_partial_splits()
|
124 |
-
:return: the embedding as a numpy array of float32 of shape (model_embedding_size,). If
|
125 |
-
<return_partials> is True, the partial utterances as a numpy array of float32 of shape
|
126 |
-
(n_partials, model_embedding_size) and the wav partials as a list of slices will also be
|
127 |
-
returned. If <using_partials> is simultaneously set to False, both these values will be None
|
128 |
-
instead.
|
129 |
-
"""
|
130 |
-
# Process the entire utterance if not using partials
|
131 |
-
if not using_partials:
|
132 |
-
frames = audio.wav_to_mel_spectrogram(wav)
|
133 |
-
embed = embed_frames_batch(frames[None, ...])[0]
|
134 |
-
if return_partials:
|
135 |
-
return embed, None, None
|
136 |
-
return embed
|
137 |
-
|
138 |
-
# Compute where to split the utterance into partials and pad if necessary
|
139 |
-
wave_slices, mel_slices = compute_partial_slices(len(wav), **kwargs)
|
140 |
-
max_wave_length = wave_slices[-1].stop
|
141 |
-
if max_wave_length >= len(wav):
|
142 |
-
wav = np.pad(wav, (0, max_wave_length - len(wav)), "constant")
|
143 |
-
|
144 |
-
# Split the utterance into partials
|
145 |
-
frames = audio.wav_to_mel_spectrogram(wav)
|
146 |
-
frames_batch = np.array([frames[s] for s in mel_slices])
|
147 |
-
partial_embeds = embed_frames_batch(frames_batch)
|
148 |
-
|
149 |
-
# Compute the utterance embedding from the partial embeddings
|
150 |
-
raw_embed = np.mean(partial_embeds, axis=0)
|
151 |
-
embed = raw_embed / np.linalg.norm(raw_embed, 2)
|
152 |
-
|
153 |
-
if return_partials:
|
154 |
-
return embed, partial_embeds, wave_slices
|
155 |
-
return embed
|
156 |
-
|
157 |
-
|
158 |
-
def embed_speaker(wavs, **kwargs):
|
159 |
-
raise NotImplemented()
|
160 |
-
|
161 |
-
|
162 |
-
def plot_embedding_as_heatmap(embed, ax=None, title="", shape=None, color_range=(0, 0.30)):
|
163 |
-
if ax is None:
|
164 |
-
ax = plt.gca()
|
165 |
-
|
166 |
-
if shape is None:
|
167 |
-
height = int(np.sqrt(len(embed)))
|
168 |
-
shape = (height, -1)
|
169 |
-
embed = embed.reshape(shape)
|
170 |
-
|
171 |
-
cmap = cm.get_cmap()
|
172 |
-
mappable = ax.imshow(embed, cmap=cmap)
|
173 |
-
cbar = plt.colorbar(mappable, ax=ax, fraction=0.046, pad=0.04)
|
174 |
-
cbar.set_clim(*color_range)
|
175 |
-
|
176 |
-
ax.set_xticks([]), ax.set_yticks([])
|
177 |
-
ax.set_title(title)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/diffusionmodules/openaimodel.py
DELETED
@@ -1,963 +0,0 @@
|
|
1 |
-
from abc import abstractmethod
|
2 |
-
from functools import partial
|
3 |
-
import math
|
4 |
-
from typing import Iterable
|
5 |
-
|
6 |
-
import numpy as np
|
7 |
-
import torch as th
|
8 |
-
import torch.nn as nn
|
9 |
-
import torch.nn.functional as F
|
10 |
-
|
11 |
-
from ldm.modules.diffusionmodules.util import (
|
12 |
-
checkpoint,
|
13 |
-
conv_nd,
|
14 |
-
linear,
|
15 |
-
avg_pool_nd,
|
16 |
-
zero_module,
|
17 |
-
normalization,
|
18 |
-
timestep_embedding,
|
19 |
-
)
|
20 |
-
from ldm.modules.attention import SpatialTransformer
|
21 |
-
|
22 |
-
|
23 |
-
# dummy replace
|
24 |
-
def convert_module_to_f16(x):
|
25 |
-
pass
|
26 |
-
|
27 |
-
def convert_module_to_f32(x):
|
28 |
-
pass
|
29 |
-
|
30 |
-
|
31 |
-
## go
|
32 |
-
class AttentionPool2d(nn.Module):
|
33 |
-
"""
|
34 |
-
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
|
35 |
-
"""
|
36 |
-
|
37 |
-
def __init__(
|
38 |
-
self,
|
39 |
-
spacial_dim: int,
|
40 |
-
embed_dim: int,
|
41 |
-
num_heads_channels: int,
|
42 |
-
output_dim: int = None,
|
43 |
-
):
|
44 |
-
super().__init__()
|
45 |
-
self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
|
46 |
-
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
|
47 |
-
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
|
48 |
-
self.num_heads = embed_dim // num_heads_channels
|
49 |
-
self.attention = QKVAttention(self.num_heads)
|
50 |
-
|
51 |
-
def forward(self, x):
|
52 |
-
b, c, *_spatial = x.shape
|
53 |
-
x = x.reshape(b, c, -1) # NC(HW)
|
54 |
-
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
|
55 |
-
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
|
56 |
-
x = self.qkv_proj(x)
|
57 |
-
x = self.attention(x)
|
58 |
-
x = self.c_proj(x)
|
59 |
-
return x[:, :, 0]
|
60 |
-
|
61 |
-
|
62 |
-
class TimestepBlock(nn.Module):
|
63 |
-
"""
|
64 |
-
Any module where forward() takes timestep embeddings as a second argument.
|
65 |
-
"""
|
66 |
-
|
67 |
-
@abstractmethod
|
68 |
-
def forward(self, x, emb):
|
69 |
-
"""
|
70 |
-
Apply the module to `x` given `emb` timestep embeddings.
|
71 |
-
"""
|
72 |
-
|
73 |
-
|
74 |
-
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
|
75 |
-
"""
|
76 |
-
A sequential module that passes timestep embeddings to the children that
|
77 |
-
support it as an extra input.
|
78 |
-
"""
|
79 |
-
|
80 |
-
def forward(self, x, emb, context=None):
|
81 |
-
for layer in self:
|
82 |
-
if isinstance(layer, TimestepBlock):
|
83 |
-
x = layer(x, emb)
|
84 |
-
elif isinstance(layer, SpatialTransformer):
|
85 |
-
x = layer(x, context)
|
86 |
-
else:
|
87 |
-
x = layer(x)
|
88 |
-
return x
|
89 |
-
|
90 |
-
|
91 |
-
class Upsample(nn.Module):
|
92 |
-
"""
|
93 |
-
An upsampling layer with an optional convolution.
|
94 |
-
:param channels: channels in the inputs and outputs.
|
95 |
-
:param use_conv: a bool determining if a convolution is applied.
|
96 |
-
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
|
97 |
-
upsampling occurs in the inner-two dimensions.
|
98 |
-
"""
|
99 |
-
|
100 |
-
def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
|
101 |
-
super().__init__()
|
102 |
-
self.channels = channels
|
103 |
-
self.out_channels = out_channels or channels
|
104 |
-
self.use_conv = use_conv
|
105 |
-
self.dims = dims
|
106 |
-
if use_conv:
|
107 |
-
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
|
108 |
-
|
109 |
-
def forward(self, x):
|
110 |
-
assert x.shape[1] == self.channels
|
111 |
-
if self.dims == 3:
|
112 |
-
x = F.interpolate(
|
113 |
-
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
|
114 |
-
)
|
115 |
-
else:
|
116 |
-
x = F.interpolate(x, scale_factor=2, mode="nearest")
|
117 |
-
if self.use_conv:
|
118 |
-
x = self.conv(x)
|
119 |
-
return x
|
120 |
-
|
121 |
-
class TransposedUpsample(nn.Module):
|
122 |
-
'Learned 2x upsampling without padding'
|
123 |
-
def __init__(self, channels, out_channels=None, ks=5):
|
124 |
-
super().__init__()
|
125 |
-
self.channels = channels
|
126 |
-
self.out_channels = out_channels or channels
|
127 |
-
|
128 |
-
self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
|
129 |
-
|
130 |
-
def forward(self,x):
|
131 |
-
return self.up(x)
|
132 |
-
|
133 |
-
|
134 |
-
class Downsample(nn.Module):
|
135 |
-
"""
|
136 |
-
A downsampling layer with an optional convolution.
|
137 |
-
:param channels: channels in the inputs and outputs.
|
138 |
-
:param use_conv: a bool determining if a convolution is applied.
|
139 |
-
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
|
140 |
-
downsampling occurs in the inner-two dimensions.
|
141 |
-
"""
|
142 |
-
|
143 |
-
def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
|
144 |
-
super().__init__()
|
145 |
-
self.channels = channels
|
146 |
-
self.out_channels = out_channels or channels
|
147 |
-
self.use_conv = use_conv
|
148 |
-
self.dims = dims
|
149 |
-
stride = 2 if dims != 3 else (1, 2, 2)
|
150 |
-
if use_conv:
|
151 |
-
self.op = conv_nd(
|
152 |
-
dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
|
153 |
-
)
|
154 |
-
else:
|
155 |
-
assert self.channels == self.out_channels
|
156 |
-
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
|
157 |
-
|
158 |
-
def forward(self, x):
|
159 |
-
assert x.shape[1] == self.channels
|
160 |
-
return self.op(x)
|
161 |
-
|
162 |
-
|
163 |
-
class ResBlock(TimestepBlock):
|
164 |
-
"""
|
165 |
-
A residual block that can optionally change the number of channels.
|
166 |
-
:param channels: the number of input channels.
|
167 |
-
:param emb_channels: the number of timestep embedding channels.
|
168 |
-
:param dropout: the rate of dropout.
|
169 |
-
:param out_channels: if specified, the number of out channels.
|
170 |
-
:param use_conv: if True and out_channels is specified, use a spatial
|
171 |
-
convolution instead of a smaller 1x1 convolution to change the
|
172 |
-
channels in the skip connection.
|
173 |
-
:param dims: determines if the signal is 1D, 2D, or 3D.
|
174 |
-
:param use_checkpoint: if True, use gradient checkpointing on this module.
|
175 |
-
:param up: if True, use this block for upsampling.
|
176 |
-
:param down: if True, use this block for downsampling.
|
177 |
-
"""
|
178 |
-
|
179 |
-
def __init__(
|
180 |
-
self,
|
181 |
-
channels,
|
182 |
-
emb_channels,
|
183 |
-
dropout,
|
184 |
-
out_channels=None,
|
185 |
-
use_conv=False,
|
186 |
-
use_scale_shift_norm=False,
|
187 |
-
dims=2,
|
188 |
-
use_checkpoint=False,
|
189 |
-
up=False,
|
190 |
-
down=False,
|
191 |
-
):
|
192 |
-
super().__init__()
|
193 |
-
self.channels = channels
|
194 |
-
self.emb_channels = emb_channels
|
195 |
-
self.dropout = dropout
|
196 |
-
self.out_channels = out_channels or channels
|
197 |
-
self.use_conv = use_conv
|
198 |
-
self.use_checkpoint = use_checkpoint
|
199 |
-
self.use_scale_shift_norm = use_scale_shift_norm
|
200 |
-
|
201 |
-
self.in_layers = nn.Sequential(
|
202 |
-
normalization(channels),
|
203 |
-
nn.SiLU(),
|
204 |
-
conv_nd(dims, channels, self.out_channels, 3, padding=1),
|
205 |
-
)
|
206 |
-
|
207 |
-
self.updown = up or down
|
208 |
-
|
209 |
-
if up:
|
210 |
-
self.h_upd = Upsample(channels, False, dims)
|
211 |
-
self.x_upd = Upsample(channels, False, dims)
|
212 |
-
elif down:
|
213 |
-
self.h_upd = Downsample(channels, False, dims)
|
214 |
-
self.x_upd = Downsample(channels, False, dims)
|
215 |
-
else:
|
216 |
-
self.h_upd = self.x_upd = nn.Identity()
|
217 |
-
|
218 |
-
self.emb_layers = nn.Sequential(
|
219 |
-
nn.SiLU(),
|
220 |
-
linear(
|
221 |
-
emb_channels,
|
222 |
-
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
|
223 |
-
),
|
224 |
-
)
|
225 |
-
self.out_layers = nn.Sequential(
|
226 |
-
normalization(self.out_channels),
|
227 |
-
nn.SiLU(),
|
228 |
-
nn.Dropout(p=dropout),
|
229 |
-
zero_module(
|
230 |
-
conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
|
231 |
-
),
|
232 |
-
)
|
233 |
-
|
234 |
-
if self.out_channels == channels:
|
235 |
-
self.skip_connection = nn.Identity()
|
236 |
-
elif use_conv:
|
237 |
-
self.skip_connection = conv_nd(
|
238 |
-
dims, channels, self.out_channels, 3, padding=1
|
239 |
-
)
|
240 |
-
else:
|
241 |
-
self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
|
242 |
-
|
243 |
-
def forward(self, x, emb):
|
244 |
-
"""
|
245 |
-
Apply the block to a Tensor, conditioned on a timestep embedding.
|
246 |
-
:param x: an [N x C x ...] Tensor of features.
|
247 |
-
:param emb: an [N x emb_channels] Tensor of timestep embeddings.
|
248 |
-
:return: an [N x C x ...] Tensor of outputs.
|
249 |
-
"""
|
250 |
-
return checkpoint(
|
251 |
-
self._forward, (x, emb), self.parameters(), self.use_checkpoint
|
252 |
-
)
|
253 |
-
|
254 |
-
|
255 |
-
def _forward(self, x, emb):
|
256 |
-
if self.updown:
|
257 |
-
in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
|
258 |
-
h = in_rest(x)
|
259 |
-
h = self.h_upd(h)
|
260 |
-
x = self.x_upd(x)
|
261 |
-
h = in_conv(h)
|
262 |
-
else:
|
263 |
-
h = self.in_layers(x)
|
264 |
-
emb_out = self.emb_layers(emb).type(h.dtype)
|
265 |
-
while len(emb_out.shape) < len(h.shape):
|
266 |
-
emb_out = emb_out[..., None]
|
267 |
-
if self.use_scale_shift_norm:
|
268 |
-
out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
|
269 |
-
scale, shift = th.chunk(emb_out, 2, dim=1)
|
270 |
-
h = out_norm(h) * (1 + scale) + shift
|
271 |
-
h = out_rest(h)
|
272 |
-
else:
|
273 |
-
h = h + emb_out
|
274 |
-
h = self.out_layers(h)
|
275 |
-
return self.skip_connection(x) + h
|
276 |
-
|
277 |
-
|
278 |
-
class AttentionBlock(nn.Module):
|
279 |
-
"""
|
280 |
-
An attention block that allows spatial positions to attend to each other.
|
281 |
-
Originally ported from here, but adapted to the N-d case.
|
282 |
-
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
|
283 |
-
"""
|
284 |
-
|
285 |
-
def __init__(
|
286 |
-
self,
|
287 |
-
channels,
|
288 |
-
num_heads=1,
|
289 |
-
num_head_channels=-1,
|
290 |
-
use_checkpoint=False,
|
291 |
-
use_new_attention_order=False,
|
292 |
-
):
|
293 |
-
super().__init__()
|
294 |
-
self.channels = channels
|
295 |
-
if num_head_channels == -1:
|
296 |
-
self.num_heads = num_heads
|
297 |
-
else:
|
298 |
-
assert (
|
299 |
-
channels % num_head_channels == 0
|
300 |
-
), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
|
301 |
-
self.num_heads = channels // num_head_channels
|
302 |
-
self.use_checkpoint = use_checkpoint
|
303 |
-
self.norm = normalization(channels)
|
304 |
-
self.qkv = conv_nd(1, channels, channels * 3, 1)
|
305 |
-
if use_new_attention_order:
|
306 |
-
# split qkv before split heads
|
307 |
-
self.attention = QKVAttention(self.num_heads)
|
308 |
-
else:
|
309 |
-
# split heads before split qkv
|
310 |
-
self.attention = QKVAttentionLegacy(self.num_heads)
|
311 |
-
|
312 |
-
self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
|
313 |
-
|
314 |
-
def forward(self, x):
|
315 |
-
return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!!
|
316 |
-
#return pt_checkpoint(self._forward, x) # pytorch
|
317 |
-
|
318 |
-
def _forward(self, x):
|
319 |
-
b, c, *spatial = x.shape
|
320 |
-
x = x.reshape(b, c, -1)
|
321 |
-
qkv = self.qkv(self.norm(x))
|
322 |
-
h = self.attention(qkv)
|
323 |
-
h = self.proj_out(h)
|
324 |
-
return (x + h).reshape(b, c, *spatial)
|
325 |
-
|
326 |
-
|
327 |
-
def count_flops_attn(model, _x, y):
|
328 |
-
"""
|
329 |
-
A counter for the `thop` package to count the operations in an
|
330 |
-
attention operation.
|
331 |
-
Meant to be used like:
|
332 |
-
macs, params = thop.profile(
|
333 |
-
model,
|
334 |
-
inputs=(inputs, timestamps),
|
335 |
-
custom_ops={QKVAttention: QKVAttention.count_flops},
|
336 |
-
)
|
337 |
-
"""
|
338 |
-
b, c, *spatial = y[0].shape
|
339 |
-
num_spatial = int(np.prod(spatial))
|
340 |
-
# We perform two matmuls with the same number of ops.
|
341 |
-
# The first computes the weight matrix, the second computes
|
342 |
-
# the combination of the value vectors.
|
343 |
-
matmul_ops = 2 * b * (num_spatial ** 2) * c
|
344 |
-
model.total_ops += th.DoubleTensor([matmul_ops])
|
345 |
-
|
346 |
-
|
347 |
-
class QKVAttentionLegacy(nn.Module):
|
348 |
-
"""
|
349 |
-
A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
|
350 |
-
"""
|
351 |
-
|
352 |
-
def __init__(self, n_heads):
|
353 |
-
super().__init__()
|
354 |
-
self.n_heads = n_heads
|
355 |
-
|
356 |
-
def forward(self, qkv):
|
357 |
-
"""
|
358 |
-
Apply QKV attention.
|
359 |
-
:param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
|
360 |
-
:return: an [N x (H * C) x T] tensor after attention.
|
361 |
-
"""
|
362 |
-
bs, width, length = qkv.shape
|
363 |
-
assert width % (3 * self.n_heads) == 0
|
364 |
-
ch = width // (3 * self.n_heads)
|
365 |
-
q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
|
366 |
-
scale = 1 / math.sqrt(math.sqrt(ch))
|
367 |
-
weight = th.einsum(
|
368 |
-
"bct,bcs->bts", q * scale, k * scale
|
369 |
-
) # More stable with f16 than dividing afterwards
|
370 |
-
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
|
371 |
-
a = th.einsum("bts,bcs->bct", weight, v)
|
372 |
-
return a.reshape(bs, -1, length)
|
373 |
-
|
374 |
-
@staticmethod
|
375 |
-
def count_flops(model, _x, y):
|
376 |
-
return count_flops_attn(model, _x, y)
|
377 |
-
|
378 |
-
|
379 |
-
class QKVAttention(nn.Module):
|
380 |
-
"""
|
381 |
-
A module which performs QKV attention and splits in a different order.
|
382 |
-
"""
|
383 |
-
|
384 |
-
def __init__(self, n_heads):
|
385 |
-
super().__init__()
|
386 |
-
self.n_heads = n_heads
|
387 |
-
|
388 |
-
def forward(self, qkv):
|
389 |
-
"""
|
390 |
-
Apply QKV attention.
|
391 |
-
:param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
|
392 |
-
:return: an [N x (H * C) x T] tensor after attention.
|
393 |
-
"""
|
394 |
-
bs, width, length = qkv.shape
|
395 |
-
assert width % (3 * self.n_heads) == 0
|
396 |
-
ch = width // (3 * self.n_heads)
|
397 |
-
q, k, v = qkv.chunk(3, dim=1)
|
398 |
-
scale = 1 / math.sqrt(math.sqrt(ch))
|
399 |
-
weight = th.einsum(
|
400 |
-
"bct,bcs->bts",
|
401 |
-
(q * scale).view(bs * self.n_heads, ch, length),
|
402 |
-
(k * scale).view(bs * self.n_heads, ch, length),
|
403 |
-
) # More stable with f16 than dividing afterwards
|
404 |
-
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
|
405 |
-
a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
|
406 |
-
return a.reshape(bs, -1, length)
|
407 |
-
|
408 |
-
@staticmethod
|
409 |
-
def count_flops(model, _x, y):
|
410 |
-
return count_flops_attn(model, _x, y)
|
411 |
-
|
412 |
-
|
413 |
-
class UNetModel(nn.Module):
|
414 |
-
"""
|
415 |
-
The full UNet model with attention and timestep embedding.
|
416 |
-
:param in_channels: channels in the input Tensor.
|
417 |
-
:param model_channels: base channel count for the model.
|
418 |
-
:param out_channels: channels in the output Tensor.
|
419 |
-
:param num_res_blocks: number of residual blocks per downsample.
|
420 |
-
:param attention_resolutions: a collection of downsample rates at which
|
421 |
-
attention will take place. May be a set, list, or tuple.
|
422 |
-
For example, if this contains 4, then at 4x downsampling, attention
|
423 |
-
will be used.
|
424 |
-
:param dropout: the dropout probability.
|
425 |
-
:param channel_mult: channel multiplier for each level of the UNet.
|
426 |
-
:param conv_resample: if True, use learned convolutions for upsampling and
|
427 |
-
downsampling.
|
428 |
-
:param dims: determines if the signal is 1D, 2D, or 3D.
|
429 |
-
:param num_classes: if specified (as an int), then this model will be
|
430 |
-
class-conditional with `num_classes` classes.
|
431 |
-
:param use_checkpoint: use gradient checkpointing to reduce memory usage.
|
432 |
-
:param num_heads: the number of attention heads in each attention layer.
|
433 |
-
:param num_heads_channels: if specified, ignore num_heads and instead use
|
434 |
-
a fixed channel width per attention head.
|
435 |
-
:param num_heads_upsample: works with num_heads to set a different number
|
436 |
-
of heads for upsampling. Deprecated.
|
437 |
-
:param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
|
438 |
-
:param resblock_updown: use residual blocks for up/downsampling.
|
439 |
-
:param use_new_attention_order: use a different attention pattern for potentially
|
440 |
-
increased efficiency.
|
441 |
-
"""
|
442 |
-
|
443 |
-
def __init__(
|
444 |
-
self,
|
445 |
-
image_size,
|
446 |
-
in_channels,
|
447 |
-
model_channels,
|
448 |
-
out_channels,
|
449 |
-
num_res_blocks,
|
450 |
-
attention_resolutions,
|
451 |
-
dropout=0,
|
452 |
-
channel_mult=(1, 2, 4, 8),
|
453 |
-
conv_resample=True,
|
454 |
-
dims=2,
|
455 |
-
num_classes=None,
|
456 |
-
use_checkpoint=False,
|
457 |
-
use_fp16=False,
|
458 |
-
num_heads=-1,
|
459 |
-
num_head_channels=-1,
|
460 |
-
num_heads_upsample=-1,
|
461 |
-
use_scale_shift_norm=False,
|
462 |
-
resblock_updown=False,
|
463 |
-
use_new_attention_order=False,
|
464 |
-
use_spatial_transformer=False, # custom transformer support
|
465 |
-
transformer_depth=1, # custom transformer support
|
466 |
-
context_dim=None, # custom transformer support
|
467 |
-
n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
|
468 |
-
legacy=True,
|
469 |
-
):
|
470 |
-
super().__init__()
|
471 |
-
if use_spatial_transformer:
|
472 |
-
assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'
|
473 |
-
|
474 |
-
if context_dim is not None:
|
475 |
-
assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
|
476 |
-
from omegaconf.listconfig import ListConfig
|
477 |
-
if type(context_dim) == ListConfig:
|
478 |
-
context_dim = list(context_dim)
|
479 |
-
|
480 |
-
if num_heads_upsample == -1:
|
481 |
-
num_heads_upsample = num_heads
|
482 |
-
|
483 |
-
if num_heads == -1:
|
484 |
-
assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'
|
485 |
-
|
486 |
-
if num_head_channels == -1:
|
487 |
-
assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
|
488 |
-
|
489 |
-
self.image_size = image_size
|
490 |
-
self.in_channels = in_channels
|
491 |
-
self.model_channels = model_channels
|
492 |
-
self.out_channels = out_channels
|
493 |
-
self.num_res_blocks = num_res_blocks
|
494 |
-
self.attention_resolutions = attention_resolutions
|
495 |
-
self.dropout = dropout
|
496 |
-
self.channel_mult = channel_mult
|
497 |
-
self.conv_resample = conv_resample
|
498 |
-
self.num_classes = num_classes
|
499 |
-
self.use_checkpoint = use_checkpoint
|
500 |
-
self.dtype = th.float16 if use_fp16 else th.float32
|
501 |
-
self.num_heads = num_heads
|
502 |
-
self.num_head_channels = num_head_channels
|
503 |
-
self.num_heads_upsample = num_heads_upsample
|
504 |
-
self.predict_codebook_ids = n_embed is not None
|
505 |
-
|
506 |
-
time_embed_dim = model_channels * 4
|
507 |
-
self.time_embed = nn.Sequential(
|
508 |
-
linear(model_channels, time_embed_dim),
|
509 |
-
nn.SiLU(),
|
510 |
-
linear(time_embed_dim, time_embed_dim),
|
511 |
-
)
|
512 |
-
|
513 |
-
if self.num_classes is not None:
|
514 |
-
self.label_emb = nn.Embedding(num_classes, time_embed_dim)
|
515 |
-
|
516 |
-
self.input_blocks = nn.ModuleList(
|
517 |
-
[
|
518 |
-
TimestepEmbedSequential(
|
519 |
-
conv_nd(dims, in_channels, model_channels, 3, padding=1)# conv2d for txt2img/audio
|
520 |
-
)
|
521 |
-
]
|
522 |
-
)
|
523 |
-
self._feature_size = model_channels
|
524 |
-
input_block_chans = [model_channels]
|
525 |
-
ch = model_channels
|
526 |
-
ds = 1
|
527 |
-
# downsample blocks
|
528 |
-
for level, mult in enumerate(channel_mult):
|
529 |
-
for _ in range(num_res_blocks):
|
530 |
-
layers = [
|
531 |
-
ResBlock(
|
532 |
-
ch,
|
533 |
-
time_embed_dim,
|
534 |
-
dropout,
|
535 |
-
out_channels=mult * model_channels,
|
536 |
-
dims=dims,
|
537 |
-
use_checkpoint=use_checkpoint,
|
538 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
539 |
-
)
|
540 |
-
]
|
541 |
-
ch = mult * model_channels
|
542 |
-
if ds in attention_resolutions:
|
543 |
-
if num_head_channels == -1:
|
544 |
-
dim_head = ch // num_heads
|
545 |
-
else:
|
546 |
-
num_heads = ch // num_head_channels
|
547 |
-
dim_head = num_head_channels
|
548 |
-
if legacy:
|
549 |
-
#num_heads = 1
|
550 |
-
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
|
551 |
-
layers.append(
|
552 |
-
AttentionBlock(
|
553 |
-
ch,
|
554 |
-
use_checkpoint=use_checkpoint,
|
555 |
-
num_heads=num_heads,
|
556 |
-
num_head_channels=dim_head,
|
557 |
-
use_new_attention_order=use_new_attention_order,
|
558 |
-
) if not use_spatial_transformer else SpatialTransformer(# transformer_depth is 1
|
559 |
-
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
|
560 |
-
)
|
561 |
-
)
|
562 |
-
self.input_blocks.append(TimestepEmbedSequential(*layers))
|
563 |
-
self._feature_size += ch
|
564 |
-
input_block_chans.append(ch)
|
565 |
-
if level != len(channel_mult) - 1:
|
566 |
-
out_ch = ch
|
567 |
-
self.input_blocks.append(
|
568 |
-
TimestepEmbedSequential(
|
569 |
-
ResBlock(
|
570 |
-
ch,
|
571 |
-
time_embed_dim,
|
572 |
-
dropout,
|
573 |
-
out_channels=out_ch,
|
574 |
-
dims=dims,
|
575 |
-
use_checkpoint=use_checkpoint,
|
576 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
577 |
-
down=True,
|
578 |
-
)
|
579 |
-
if resblock_updown
|
580 |
-
else Downsample(
|
581 |
-
ch, conv_resample, dims=dims, out_channels=out_ch
|
582 |
-
)
|
583 |
-
)
|
584 |
-
)
|
585 |
-
ch = out_ch
|
586 |
-
input_block_chans.append(ch)
|
587 |
-
ds *= 2
|
588 |
-
self._feature_size += ch
|
589 |
-
|
590 |
-
if num_head_channels == -1:
|
591 |
-
dim_head = ch // num_heads
|
592 |
-
else:
|
593 |
-
num_heads = ch // num_head_channels
|
594 |
-
dim_head = num_head_channels
|
595 |
-
if legacy:
|
596 |
-
#num_heads = 1
|
597 |
-
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
|
598 |
-
self.middle_block = TimestepEmbedSequential(
|
599 |
-
ResBlock(
|
600 |
-
ch,
|
601 |
-
time_embed_dim,
|
602 |
-
dropout,
|
603 |
-
dims=dims,
|
604 |
-
use_checkpoint=use_checkpoint,
|
605 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
606 |
-
),
|
607 |
-
AttentionBlock(
|
608 |
-
ch,
|
609 |
-
use_checkpoint=use_checkpoint,
|
610 |
-
num_heads=num_heads,
|
611 |
-
num_head_channels=dim_head,
|
612 |
-
use_new_attention_order=use_new_attention_order,
|
613 |
-
) if not use_spatial_transformer else SpatialTransformer(
|
614 |
-
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
|
615 |
-
),
|
616 |
-
ResBlock(
|
617 |
-
ch,
|
618 |
-
time_embed_dim,
|
619 |
-
dropout,
|
620 |
-
dims=dims,
|
621 |
-
use_checkpoint=use_checkpoint,
|
622 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
623 |
-
),
|
624 |
-
)
|
625 |
-
self._feature_size += ch
|
626 |
-
# upsample blocks
|
627 |
-
self.output_blocks = nn.ModuleList([])
|
628 |
-
for level, mult in list(enumerate(channel_mult))[::-1]:
|
629 |
-
for i in range(num_res_blocks + 1):
|
630 |
-
ich = input_block_chans.pop()
|
631 |
-
layers = [
|
632 |
-
ResBlock(
|
633 |
-
ch + ich,
|
634 |
-
time_embed_dim,
|
635 |
-
dropout,
|
636 |
-
out_channels=model_channels * mult,
|
637 |
-
dims=dims,
|
638 |
-
use_checkpoint=use_checkpoint,
|
639 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
640 |
-
)
|
641 |
-
]
|
642 |
-
ch = model_channels * mult
|
643 |
-
if ds in attention_resolutions:
|
644 |
-
if num_head_channels == -1:
|
645 |
-
dim_head = ch // num_heads
|
646 |
-
else:
|
647 |
-
num_heads = ch // num_head_channels
|
648 |
-
dim_head = num_head_channels
|
649 |
-
if legacy:
|
650 |
-
#num_heads = 1
|
651 |
-
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
|
652 |
-
layers.append(
|
653 |
-
AttentionBlock(
|
654 |
-
ch,
|
655 |
-
use_checkpoint=use_checkpoint,
|
656 |
-
num_heads=num_heads_upsample,
|
657 |
-
num_head_channels=dim_head,
|
658 |
-
use_new_attention_order=use_new_attention_order,
|
659 |
-
) if not use_spatial_transformer else SpatialTransformer(
|
660 |
-
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
|
661 |
-
)
|
662 |
-
)
|
663 |
-
if level and i == num_res_blocks:
|
664 |
-
out_ch = ch
|
665 |
-
layers.append(
|
666 |
-
ResBlock(
|
667 |
-
ch,
|
668 |
-
time_embed_dim,
|
669 |
-
dropout,
|
670 |
-
out_channels=out_ch,
|
671 |
-
dims=dims,
|
672 |
-
use_checkpoint=use_checkpoint,
|
673 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
674 |
-
up=True,
|
675 |
-
)
|
676 |
-
if resblock_updown
|
677 |
-
else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
|
678 |
-
)
|
679 |
-
ds //= 2
|
680 |
-
self.output_blocks.append(TimestepEmbedSequential(*layers))
|
681 |
-
self._feature_size += ch
|
682 |
-
|
683 |
-
self.out = nn.Sequential(
|
684 |
-
normalization(ch),
|
685 |
-
nn.SiLU(),
|
686 |
-
zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
|
687 |
-
)
|
688 |
-
if self.predict_codebook_ids:
|
689 |
-
self.id_predictor = nn.Sequential(
|
690 |
-
normalization(ch),
|
691 |
-
conv_nd(dims, model_channels, n_embed, 1),
|
692 |
-
#nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits
|
693 |
-
)
|
694 |
-
|
695 |
-
def convert_to_fp16(self):
|
696 |
-
"""
|
697 |
-
Convert the torso of the model to float16.
|
698 |
-
"""
|
699 |
-
self.input_blocks.apply(convert_module_to_f16)
|
700 |
-
self.middle_block.apply(convert_module_to_f16)
|
701 |
-
self.output_blocks.apply(convert_module_to_f16)
|
702 |
-
|
703 |
-
def convert_to_fp32(self):
|
704 |
-
"""
|
705 |
-
Convert the torso of the model to float32.
|
706 |
-
"""
|
707 |
-
self.input_blocks.apply(convert_module_to_f32)
|
708 |
-
self.middle_block.apply(convert_module_to_f32)
|
709 |
-
self.output_blocks.apply(convert_module_to_f32)
|
710 |
-
|
711 |
-
def forward(self, x, timesteps=None, context=None, y=None,**kwargs):
|
712 |
-
"""
|
713 |
-
Apply the model to an input batch.
|
714 |
-
:param x: an [N x C x ...] Tensor of inputs.
|
715 |
-
:param timesteps: a 1-D batch of timesteps,shape [N]
|
716 |
-
:param context: conditioning plugged in via crossattn. for txt2img shape is [N,77,context_dim]
|
717 |
-
:param y: an [N] Tensor of labels, if class-conditional.
|
718 |
-
:return: an [N x C x ...] Tensor of outputs.
|
719 |
-
"""
|
720 |
-
# print(f"in unet {x.shape}")
|
721 |
-
assert (y is not None) == (
|
722 |
-
self.num_classes is not None
|
723 |
-
), "must specify y if and only if the model is class-conditional"
|
724 |
-
hs = []
|
725 |
-
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)# shape [N,self.model_channels]
|
726 |
-
emb = self.time_embed(t_emb)# shape [N,context_dim]
|
727 |
-
|
728 |
-
if self.num_classes is not None:# only for class label
|
729 |
-
assert y.shape == (x.shape[0],)
|
730 |
-
emb = emb + self.label_emb(y)
|
731 |
-
|
732 |
-
h = x.type(self.dtype)# [N,C,10,106]
|
733 |
-
for module in self.input_blocks:
|
734 |
-
h = module(h, emb, context)# 0:[N,self.model_channels,10,106],1:[N,self.model_channels,10,106],2:[N,self.model_channels,10,106] 3:[N,self.model_channels,5,53] 4:[N,self.model_channels,5,53] 5:[N,self.model_channels*2,5,53]
|
735 |
-
hs.append(h)
|
736 |
-
h = self.middle_block(h, emb, context)# no shape change
|
737 |
-
for module in self.output_blocks:
|
738 |
-
h = th.cat([h, hs.pop()], dim=1)# 在这里c维度乘2或+self.model_channels,其余维度不变
|
739 |
-
h = module(h, emb, context)# 在这里c维度/2回到之前维度,h,w不变或*2
|
740 |
-
h = h.type(x.dtype)# 至此h维度和输入x维度回到相同状态
|
741 |
-
if self.predict_codebook_ids:
|
742 |
-
return self.id_predictor(h)
|
743 |
-
else:
|
744 |
-
return self.out(h)
|
745 |
-
|
746 |
-
|
747 |
-
class EncoderUNetModel(nn.Module):
|
748 |
-
"""
|
749 |
-
The half UNet model with attention and timestep embedding.
|
750 |
-
For usage, see UNet.
|
751 |
-
"""
|
752 |
-
|
753 |
-
def __init__(
|
754 |
-
self,
|
755 |
-
image_size,
|
756 |
-
in_channels,
|
757 |
-
model_channels,
|
758 |
-
out_channels,
|
759 |
-
num_res_blocks,
|
760 |
-
attention_resolutions,
|
761 |
-
dropout=0,
|
762 |
-
channel_mult=(1, 2, 4, 8),
|
763 |
-
conv_resample=True,
|
764 |
-
dims=2,
|
765 |
-
use_checkpoint=False,
|
766 |
-
use_fp16=False,
|
767 |
-
num_heads=1,
|
768 |
-
num_head_channels=-1,
|
769 |
-
num_heads_upsample=-1,
|
770 |
-
use_scale_shift_norm=False,
|
771 |
-
resblock_updown=False,
|
772 |
-
use_new_attention_order=False,
|
773 |
-
pool="adaptive",
|
774 |
-
*args,
|
775 |
-
**kwargs
|
776 |
-
):
|
777 |
-
super().__init__()
|
778 |
-
|
779 |
-
if num_heads_upsample == -1:
|
780 |
-
num_heads_upsample = num_heads
|
781 |
-
|
782 |
-
self.in_channels = in_channels
|
783 |
-
self.model_channels = model_channels
|
784 |
-
self.out_channels = out_channels
|
785 |
-
self.num_res_blocks = num_res_blocks
|
786 |
-
self.attention_resolutions = attention_resolutions
|
787 |
-
self.dropout = dropout
|
788 |
-
self.channel_mult = channel_mult
|
789 |
-
self.conv_resample = conv_resample
|
790 |
-
self.use_checkpoint = use_checkpoint
|
791 |
-
self.dtype = th.float16 if use_fp16 else th.float32
|
792 |
-
self.num_heads = num_heads
|
793 |
-
self.num_head_channels = num_head_channels
|
794 |
-
self.num_heads_upsample = num_heads_upsample
|
795 |
-
|
796 |
-
time_embed_dim = model_channels * 4
|
797 |
-
self.time_embed = nn.Sequential(
|
798 |
-
linear(model_channels, time_embed_dim),
|
799 |
-
nn.SiLU(),
|
800 |
-
linear(time_embed_dim, time_embed_dim),
|
801 |
-
)
|
802 |
-
|
803 |
-
self.input_blocks = nn.ModuleList(
|
804 |
-
[
|
805 |
-
TimestepEmbedSequential(
|
806 |
-
conv_nd(dims, in_channels, model_channels, 3, padding=1)
|
807 |
-
)
|
808 |
-
]
|
809 |
-
)
|
810 |
-
self._feature_size = model_channels
|
811 |
-
input_block_chans = [model_channels]
|
812 |
-
ch = model_channels
|
813 |
-
ds = 1
|
814 |
-
for level, mult in enumerate(channel_mult):
|
815 |
-
for _ in range(num_res_blocks):
|
816 |
-
layers = [
|
817 |
-
ResBlock(
|
818 |
-
ch,
|
819 |
-
time_embed_dim,
|
820 |
-
dropout,
|
821 |
-
out_channels=mult * model_channels,
|
822 |
-
dims=dims,
|
823 |
-
use_checkpoint=use_checkpoint,
|
824 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
825 |
-
)
|
826 |
-
]
|
827 |
-
ch = mult * model_channels
|
828 |
-
if ds in attention_resolutions:
|
829 |
-
layers.append(
|
830 |
-
AttentionBlock(
|
831 |
-
ch,
|
832 |
-
use_checkpoint=use_checkpoint,
|
833 |
-
num_heads=num_heads,
|
834 |
-
num_head_channels=num_head_channels,
|
835 |
-
use_new_attention_order=use_new_attention_order,
|
836 |
-
)
|
837 |
-
)
|
838 |
-
self.input_blocks.append(TimestepEmbedSequential(*layers))
|
839 |
-
self._feature_size += ch
|
840 |
-
input_block_chans.append(ch)
|
841 |
-
if level != len(channel_mult) - 1:
|
842 |
-
out_ch = ch
|
843 |
-
self.input_blocks.append(
|
844 |
-
TimestepEmbedSequential(
|
845 |
-
ResBlock(
|
846 |
-
ch,
|
847 |
-
time_embed_dim,
|
848 |
-
dropout,
|
849 |
-
out_channels=out_ch,
|
850 |
-
dims=dims,
|
851 |
-
use_checkpoint=use_checkpoint,
|
852 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
853 |
-
down=True,
|
854 |
-
)
|
855 |
-
if resblock_updown
|
856 |
-
else Downsample(
|
857 |
-
ch, conv_resample, dims=dims, out_channels=out_ch
|
858 |
-
)
|
859 |
-
)
|
860 |
-
)
|
861 |
-
ch = out_ch
|
862 |
-
input_block_chans.append(ch)
|
863 |
-
ds *= 2
|
864 |
-
self._feature_size += ch
|
865 |
-
|
866 |
-
self.middle_block = TimestepEmbedSequential(
|
867 |
-
ResBlock(
|
868 |
-
ch,
|
869 |
-
time_embed_dim,
|
870 |
-
dropout,
|
871 |
-
dims=dims,
|
872 |
-
use_checkpoint=use_checkpoint,
|
873 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
874 |
-
),
|
875 |
-
AttentionBlock(
|
876 |
-
ch,
|
877 |
-
use_checkpoint=use_checkpoint,
|
878 |
-
num_heads=num_heads,
|
879 |
-
num_head_channels=num_head_channels,
|
880 |
-
use_new_attention_order=use_new_attention_order,
|
881 |
-
),
|
882 |
-
ResBlock(
|
883 |
-
ch,
|
884 |
-
time_embed_dim,
|
885 |
-
dropout,
|
886 |
-
dims=dims,
|
887 |
-
use_checkpoint=use_checkpoint,
|
888 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
889 |
-
),
|
890 |
-
)
|
891 |
-
self._feature_size += ch
|
892 |
-
self.pool = pool
|
893 |
-
if pool == "adaptive":
|
894 |
-
self.out = nn.Sequential(
|
895 |
-
normalization(ch),
|
896 |
-
nn.SiLU(),
|
897 |
-
nn.AdaptiveAvgPool2d((1, 1)),
|
898 |
-
zero_module(conv_nd(dims, ch, out_channels, 1)),
|
899 |
-
nn.Flatten(),
|
900 |
-
)
|
901 |
-
elif pool == "attention":
|
902 |
-
assert num_head_channels != -1
|
903 |
-
self.out = nn.Sequential(
|
904 |
-
normalization(ch),
|
905 |
-
nn.SiLU(),
|
906 |
-
AttentionPool2d(
|
907 |
-
(image_size // ds), ch, num_head_channels, out_channels
|
908 |
-
),
|
909 |
-
)
|
910 |
-
elif pool == "spatial":
|
911 |
-
self.out = nn.Sequential(
|
912 |
-
nn.Linear(self._feature_size, 2048),
|
913 |
-
nn.ReLU(),
|
914 |
-
nn.Linear(2048, self.out_channels),
|
915 |
-
)
|
916 |
-
elif pool == "spatial_v2":
|
917 |
-
self.out = nn.Sequential(
|
918 |
-
nn.Linear(self._feature_size, 2048),
|
919 |
-
normalization(2048),
|
920 |
-
nn.SiLU(),
|
921 |
-
nn.Linear(2048, self.out_channels),
|
922 |
-
)
|
923 |
-
else:
|
924 |
-
raise NotImplementedError(f"Unexpected {pool} pooling")
|
925 |
-
|
926 |
-
def convert_to_fp16(self):
|
927 |
-
"""
|
928 |
-
Convert the torso of the model to float16.
|
929 |
-
"""
|
930 |
-
self.input_blocks.apply(convert_module_to_f16)
|
931 |
-
self.middle_block.apply(convert_module_to_f16)
|
932 |
-
|
933 |
-
def convert_to_fp32(self):
|
934 |
-
"""
|
935 |
-
Convert the torso of the model to float32.
|
936 |
-
"""
|
937 |
-
self.input_blocks.apply(convert_module_to_f32)
|
938 |
-
self.middle_block.apply(convert_module_to_f32)
|
939 |
-
|
940 |
-
def forward(self, x, timesteps):
|
941 |
-
"""
|
942 |
-
Apply the model to an input batch.
|
943 |
-
:param x: an [N x C x ...] Tensor of inputs.
|
944 |
-
:param timesteps: a 1-D batch of timesteps.
|
945 |
-
:return: an [N x K] Tensor of outputs.
|
946 |
-
"""
|
947 |
-
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
|
948 |
-
|
949 |
-
results = []
|
950 |
-
h = x.type(self.dtype)
|
951 |
-
for module in self.input_blocks:
|
952 |
-
h = module(h, emb)
|
953 |
-
if self.pool.startswith("spatial"):
|
954 |
-
results.append(h.type(x.dtype).mean(dim=(2, 3)))
|
955 |
-
h = self.middle_block(h, emb)
|
956 |
-
if self.pool.startswith("spatial"):
|
957 |
-
results.append(h.type(x.dtype).mean(dim=(2, 3)))
|
958 |
-
h = th.cat(results, axis=-1)
|
959 |
-
return self.out(h)
|
960 |
-
else:
|
961 |
-
h = h.type(x.dtype)
|
962 |
-
return self.out(h)
|
963 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AILab-CVC/EvalCrafter/test.py
DELETED
File without changes
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/AiService.py
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import requests
|
4 |
-
|
5 |
-
from ..typing import Any, CreateResult
|
6 |
-
from .base_provider import BaseProvider
|
7 |
-
|
8 |
-
|
9 |
-
class AiService(BaseProvider):
|
10 |
-
url = "https://aiservice.vercel.app/"
|
11 |
-
working = False
|
12 |
-
supports_gpt_35_turbo = True
|
13 |
-
|
14 |
-
@staticmethod
|
15 |
-
def create_completion(
|
16 |
-
model: str,
|
17 |
-
messages: list[dict[str, str]],
|
18 |
-
stream: bool,
|
19 |
-
**kwargs: Any,
|
20 |
-
) -> CreateResult:
|
21 |
-
base = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
|
22 |
-
base += "\nassistant: "
|
23 |
-
|
24 |
-
headers = {
|
25 |
-
"accept": "*/*",
|
26 |
-
"content-type": "text/plain;charset=UTF-8",
|
27 |
-
"sec-fetch-dest": "empty",
|
28 |
-
"sec-fetch-mode": "cors",
|
29 |
-
"sec-fetch-site": "same-origin",
|
30 |
-
"Referer": "https://aiservice.vercel.app/chat",
|
31 |
-
}
|
32 |
-
data = {"input": base}
|
33 |
-
url = "https://aiservice.vercel.app/api/chat/answer"
|
34 |
-
response = requests.post(url, headers=headers, json=data)
|
35 |
-
response.raise_for_status()
|
36 |
-
yield response.json()["data"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Bing.py
DELETED
@@ -1,300 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import random
|
4 |
-
import uuid
|
5 |
-
import json
|
6 |
-
import os
|
7 |
-
import uuid
|
8 |
-
import urllib.parse
|
9 |
-
from aiohttp import ClientSession, ClientTimeout
|
10 |
-
from ..typing import AsyncGenerator
|
11 |
-
from .base_provider import AsyncGeneratorProvider
|
12 |
-
|
13 |
-
class Tones():
|
14 |
-
creative = "Creative"
|
15 |
-
balanced = "Balanced"
|
16 |
-
precise = "Precise"
|
17 |
-
|
18 |
-
default_cookies = {
|
19 |
-
'SRCHD' : 'AF=NOFORM',
|
20 |
-
'PPLState' : '1',
|
21 |
-
'KievRPSSecAuth': '',
|
22 |
-
'SUID' : '',
|
23 |
-
'SRCHUSR' : '',
|
24 |
-
'SRCHHPGUSR' : '',
|
25 |
-
}
|
26 |
-
|
27 |
-
class Bing(AsyncGeneratorProvider):
|
28 |
-
url = "https://bing.com/chat"
|
29 |
-
working = True
|
30 |
-
supports_gpt_4 = True
|
31 |
-
|
32 |
-
@staticmethod
|
33 |
-
def create_async_generator(
|
34 |
-
model: str,
|
35 |
-
messages: list[dict[str, str]],
|
36 |
-
cookies: dict = None,
|
37 |
-
tone: str = Tones.creative,
|
38 |
-
**kwargs
|
39 |
-
) -> AsyncGenerator:
|
40 |
-
if len(messages) < 2:
|
41 |
-
prompt = messages[0]["content"]
|
42 |
-
context = None
|
43 |
-
else:
|
44 |
-
prompt = messages[-1]["content"]
|
45 |
-
context = create_context(messages[:-1])
|
46 |
-
|
47 |
-
if not cookies or "SRCHD" not in cookies:
|
48 |
-
cookies = default_cookies
|
49 |
-
return stream_generate(prompt, tone, context, cookies)
|
50 |
-
|
51 |
-
def create_context(messages: list[dict[str, str]]):
|
52 |
-
context = "".join(f"[{message['role']}](#message)\n{message['content']}\n\n" for message in messages)
|
53 |
-
|
54 |
-
return context
|
55 |
-
|
56 |
-
class Conversation():
|
57 |
-
def __init__(self, conversationId: str, clientId: str, conversationSignature: str) -> None:
|
58 |
-
self.conversationId = conversationId
|
59 |
-
self.clientId = clientId
|
60 |
-
self.conversationSignature = conversationSignature
|
61 |
-
|
62 |
-
async def create_conversation(session: ClientSession) -> Conversation:
|
63 |
-
url = 'https://www.bing.com/turing/conversation/create?bundleVersion=1.1150.3'
|
64 |
-
|
65 |
-
async with await session.get(url) as response:
|
66 |
-
data = await response.json()
|
67 |
-
|
68 |
-
conversationId = data.get('conversationId')
|
69 |
-
clientId = data.get('clientId')
|
70 |
-
conversationSignature = response.headers.get('X-Sydney-Encryptedconversationsignature')
|
71 |
-
|
72 |
-
if not conversationId or not clientId or not conversationSignature:
|
73 |
-
raise Exception('Failed to create conversation.')
|
74 |
-
|
75 |
-
return Conversation(conversationId, clientId, conversationSignature)
|
76 |
-
|
77 |
-
async def list_conversations(session: ClientSession) -> list:
|
78 |
-
url = "https://www.bing.com/turing/conversation/chats"
|
79 |
-
async with session.get(url) as response:
|
80 |
-
response = await response.json()
|
81 |
-
return response["chats"]
|
82 |
-
|
83 |
-
async def delete_conversation(session: ClientSession, conversation: Conversation) -> list:
|
84 |
-
url = "https://sydney.bing.com/sydney/DeleteSingleConversation"
|
85 |
-
json = {
|
86 |
-
"conversationId": conversation.conversationId,
|
87 |
-
"conversationSignature": conversation.conversationSignature,
|
88 |
-
"participant": {"id": conversation.clientId},
|
89 |
-
"source": "cib",
|
90 |
-
"optionsSets": ["autosave"]
|
91 |
-
}
|
92 |
-
async with session.post(url, json=json) as response:
|
93 |
-
response = await response.json()
|
94 |
-
return response["result"]["value"] == "Success"
|
95 |
-
|
96 |
-
class Defaults:
|
97 |
-
delimiter = "\x1e"
|
98 |
-
ip_address = f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
|
99 |
-
|
100 |
-
allowedMessageTypes = [
|
101 |
-
"Chat",
|
102 |
-
"Disengaged",
|
103 |
-
"AdsQuery",
|
104 |
-
"SemanticSerp",
|
105 |
-
"GenerateContentQuery",
|
106 |
-
"SearchQuery",
|
107 |
-
"ActionRequest",
|
108 |
-
"Context",
|
109 |
-
"Progress",
|
110 |
-
"AdsQuery",
|
111 |
-
"SemanticSerp",
|
112 |
-
]
|
113 |
-
|
114 |
-
sliceIds = [
|
115 |
-
"winmuid3tf",
|
116 |
-
"osbsdusgreccf",
|
117 |
-
"ttstmout",
|
118 |
-
"crchatrev",
|
119 |
-
"winlongmsgtf",
|
120 |
-
"ctrlworkpay",
|
121 |
-
"norespwtf",
|
122 |
-
"tempcacheread",
|
123 |
-
"temptacache",
|
124 |
-
"505scss0",
|
125 |
-
"508jbcars0",
|
126 |
-
"515enbotdets0",
|
127 |
-
"5082tsports",
|
128 |
-
"515vaoprvs",
|
129 |
-
"424dagslnv1s0",
|
130 |
-
"kcimgattcf",
|
131 |
-
"427startpms0",
|
132 |
-
]
|
133 |
-
|
134 |
-
location = {
|
135 |
-
"locale": "en-US",
|
136 |
-
"market": "en-US",
|
137 |
-
"region": "US",
|
138 |
-
"locationHints": [
|
139 |
-
{
|
140 |
-
"country": "United States",
|
141 |
-
"state": "California",
|
142 |
-
"city": "Los Angeles",
|
143 |
-
"timezoneoffset": 8,
|
144 |
-
"countryConfidence": 8,
|
145 |
-
"Center": {"Latitude": 34.0536909, "Longitude": -118.242766},
|
146 |
-
"RegionType": 2,
|
147 |
-
"SourceType": 1,
|
148 |
-
}
|
149 |
-
],
|
150 |
-
}
|
151 |
-
|
152 |
-
headers = {
|
153 |
-
'accept': '*/*',
|
154 |
-
'accept-language': 'en-US,en;q=0.9',
|
155 |
-
'cache-control': 'max-age=0',
|
156 |
-
'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
|
157 |
-
'sec-ch-ua-arch': '"x86"',
|
158 |
-
'sec-ch-ua-bitness': '"64"',
|
159 |
-
'sec-ch-ua-full-version': '"110.0.1587.69"',
|
160 |
-
'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
|
161 |
-
'sec-ch-ua-mobile': '?0',
|
162 |
-
'sec-ch-ua-model': '""',
|
163 |
-
'sec-ch-ua-platform': '"Windows"',
|
164 |
-
'sec-ch-ua-platform-version': '"15.0.0"',
|
165 |
-
'sec-fetch-dest': 'document',
|
166 |
-
'sec-fetch-mode': 'navigate',
|
167 |
-
'sec-fetch-site': 'none',
|
168 |
-
'sec-fetch-user': '?1',
|
169 |
-
'upgrade-insecure-requests': '1',
|
170 |
-
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
|
171 |
-
'x-edge-shopping-flag': '1',
|
172 |
-
'x-forwarded-for': ip_address,
|
173 |
-
}
|
174 |
-
|
175 |
-
optionsSets = [
|
176 |
-
'saharasugg',
|
177 |
-
'enablenewsfc',
|
178 |
-
'clgalileo',
|
179 |
-
'gencontentv3',
|
180 |
-
"nlu_direct_response_filter",
|
181 |
-
"deepleo",
|
182 |
-
"disable_emoji_spoken_text",
|
183 |
-
"responsible_ai_policy_235",
|
184 |
-
"enablemm",
|
185 |
-
"h3precise"
|
186 |
-
"dtappid",
|
187 |
-
"cricinfo",
|
188 |
-
"cricinfov2",
|
189 |
-
"dv3sugg",
|
190 |
-
"nojbfedge"
|
191 |
-
]
|
192 |
-
|
193 |
-
def format_message(msg: dict) -> str:
|
194 |
-
return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter
|
195 |
-
|
196 |
-
def create_message(conversation: Conversation, prompt: str, tone: str, context: str=None) -> str:
|
197 |
-
request_id = str(uuid.uuid4())
|
198 |
-
struct = {
|
199 |
-
'arguments': [
|
200 |
-
{
|
201 |
-
'source': 'cib',
|
202 |
-
'optionsSets': Defaults.optionsSets,
|
203 |
-
'allowedMessageTypes': Defaults.allowedMessageTypes,
|
204 |
-
'sliceIds': Defaults.sliceIds,
|
205 |
-
'traceId': os.urandom(16).hex(),
|
206 |
-
'isStartOfSession': True,
|
207 |
-
'requestId': request_id,
|
208 |
-
'message': Defaults.location | {
|
209 |
-
'author': 'user',
|
210 |
-
'inputMethod': 'Keyboard',
|
211 |
-
'text': prompt,
|
212 |
-
'messageType': 'Chat',
|
213 |
-
'requestId': request_id,
|
214 |
-
'messageId': request_id,
|
215 |
-
},
|
216 |
-
'tone': tone,
|
217 |
-
'spokenTextMode': 'None',
|
218 |
-
'conversationId': conversation.conversationId,
|
219 |
-
'participant': {
|
220 |
-
'id': conversation.clientId
|
221 |
-
},
|
222 |
-
}
|
223 |
-
],
|
224 |
-
'invocationId': '1',
|
225 |
-
'target': 'chat',
|
226 |
-
'type': 4
|
227 |
-
}
|
228 |
-
|
229 |
-
if context:
|
230 |
-
struct['arguments'][0]['previousMessages'] = [{
|
231 |
-
"author": "user",
|
232 |
-
"description": context,
|
233 |
-
"contextType": "WebPage",
|
234 |
-
"messageType": "Context",
|
235 |
-
"messageId": "discover-web--page-ping-mriduna-----"
|
236 |
-
}]
|
237 |
-
return format_message(struct)
|
238 |
-
|
239 |
-
async def stream_generate(
|
240 |
-
prompt: str,
|
241 |
-
tone: str,
|
242 |
-
context: str=None,
|
243 |
-
cookies: dict=None,
|
244 |
-
):
|
245 |
-
async with ClientSession(
|
246 |
-
timeout=ClientTimeout(total=900),
|
247 |
-
cookies=cookies,
|
248 |
-
headers=Defaults.headers,
|
249 |
-
) as session:
|
250 |
-
conversation = await create_conversation(session)
|
251 |
-
try:
|
252 |
-
async with session.ws_connect(
|
253 |
-
f'wss://sydney.bing.com/sydney/ChatHub',
|
254 |
-
autoping=False,
|
255 |
-
params={'sec_access_token': conversation.conversationSignature}
|
256 |
-
) as wss:
|
257 |
-
|
258 |
-
await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
|
259 |
-
await wss.receive(timeout=900)
|
260 |
-
await wss.send_str(create_message(conversation, prompt, tone, context))
|
261 |
-
|
262 |
-
response_txt = ''
|
263 |
-
returned_text = ''
|
264 |
-
final = False
|
265 |
-
|
266 |
-
while not final:
|
267 |
-
msg = await wss.receive(timeout=900)
|
268 |
-
objects = msg.data.split(Defaults.delimiter)
|
269 |
-
for obj in objects:
|
270 |
-
if obj is None or not obj:
|
271 |
-
continue
|
272 |
-
|
273 |
-
response = json.loads(obj)
|
274 |
-
if response.get('type') == 1 and response['arguments'][0].get('messages'):
|
275 |
-
message = response['arguments'][0]['messages'][0]
|
276 |
-
if (message['contentOrigin'] != 'Apology'):
|
277 |
-
if 'adaptiveCards' in message:
|
278 |
-
card = message['adaptiveCards'][0]['body'][0]
|
279 |
-
if "text" in card:
|
280 |
-
response_txt = card.get('text')
|
281 |
-
if message.get('messageType'):
|
282 |
-
inline_txt = card['inlines'][0].get('text')
|
283 |
-
response_txt += inline_txt + '\n'
|
284 |
-
elif message.get('contentType') == "IMAGE":
|
285 |
-
query = urllib.parse.quote(message.get('text'))
|
286 |
-
url = f"\nhttps://www.bing.com/images/create?q={query}"
|
287 |
-
response_txt += url
|
288 |
-
final = True
|
289 |
-
if response_txt.startswith(returned_text):
|
290 |
-
new = response_txt[len(returned_text):]
|
291 |
-
if new != "\n":
|
292 |
-
yield new
|
293 |
-
returned_text = response_txt
|
294 |
-
elif response.get('type') == 2:
|
295 |
-
result = response['item']['result']
|
296 |
-
if result.get('error'):
|
297 |
-
raise Exception(f"{result['value']}: {result['message']}")
|
298 |
-
return
|
299 |
-
finally:
|
300 |
-
await delete_conversation(session, conversation)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/toonifypipeline.d.ts
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import ToonifyPostFxPipeline from './shaders/toonify/ToonifyPostFxPipeline';
|
2 |
-
export default ToonifyPostFxPipeline;
|
|
|
|
|
|
spaces/Akim/claudeAPI/README.md
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: ClaudeAPI
|
3 |
-
emoji: 😻
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: blue
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
license: unknown
|
9 |
-
---
|
10 |
-
|
11 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/text/cantonese.py
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
import cn2an
|
3 |
-
import opencc
|
4 |
-
|
5 |
-
|
6 |
-
converter = opencc.OpenCC('jyutjyu')
|
7 |
-
|
8 |
-
# List of (Latin alphabet, ipa) pairs:
|
9 |
-
_latin_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
|
10 |
-
('A', 'ei˥'),
|
11 |
-
('B', 'biː˥'),
|
12 |
-
('C', 'siː˥'),
|
13 |
-
('D', 'tiː˥'),
|
14 |
-
('E', 'iː˥'),
|
15 |
-
('F', 'e˥fuː˨˩'),
|
16 |
-
('G', 'tsiː˥'),
|
17 |
-
('H', 'ɪk̚˥tsʰyː˨˩'),
|
18 |
-
('I', 'ɐi˥'),
|
19 |
-
('J', 'tsei˥'),
|
20 |
-
('K', 'kʰei˥'),
|
21 |
-
('L', 'e˥llou˨˩'),
|
22 |
-
('M', 'ɛːm˥'),
|
23 |
-
('N', 'ɛːn˥'),
|
24 |
-
('O', 'ou˥'),
|
25 |
-
('P', 'pʰiː˥'),
|
26 |
-
('Q', 'kʰiːu˥'),
|
27 |
-
('R', 'aː˥lou˨˩'),
|
28 |
-
('S', 'ɛː˥siː˨˩'),
|
29 |
-
('T', 'tʰiː˥'),
|
30 |
-
('U', 'juː˥'),
|
31 |
-
('V', 'wiː˥'),
|
32 |
-
('W', 'tʊk̚˥piː˥juː˥'),
|
33 |
-
('X', 'ɪk̚˥siː˨˩'),
|
34 |
-
('Y', 'waːi˥'),
|
35 |
-
('Z', 'iː˨sɛːt̚˥')
|
36 |
-
]]
|
37 |
-
|
38 |
-
|
39 |
-
def number_to_cantonese(text):
|
40 |
-
return re.sub(r'\d+(?:\.?\d+)?', lambda x: cn2an.an2cn(x.group()), text)
|
41 |
-
|
42 |
-
|
43 |
-
def latin_to_ipa(text):
|
44 |
-
for regex, replacement in _latin_to_ipa:
|
45 |
-
text = re.sub(regex, replacement, text)
|
46 |
-
return text
|
47 |
-
|
48 |
-
|
49 |
-
def cantonese_to_ipa(text):
|
50 |
-
text = number_to_cantonese(text.upper())
|
51 |
-
text = converter.convert(text).replace('-','').replace('$',' ')
|
52 |
-
text = re.sub(r'[A-Z]', lambda x: latin_to_ipa(x.group())+' ', text)
|
53 |
-
text = re.sub(r'[、;:]', ',', text)
|
54 |
-
text = re.sub(r'\s*,\s*', ', ', text)
|
55 |
-
text = re.sub(r'\s*。\s*', '. ', text)
|
56 |
-
text = re.sub(r'\s*?\s*', '? ', text)
|
57 |
-
text = re.sub(r'\s*!\s*', '! ', text)
|
58 |
-
text = re.sub(r'\s*$', '', text)
|
59 |
-
return text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlhitawiMohammed22/E2E_OCR/app.py
DELETED
@@ -1,178 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
import time
|
3 |
-
from pathlib import Path
|
4 |
-
import contextlib
|
5 |
-
|
6 |
-
logging.basicConfig(
|
7 |
-
level=logging.INFO,
|
8 |
-
format="%(asctime)s - %(levelname)s - %(message)s",
|
9 |
-
)
|
10 |
-
|
11 |
-
|
12 |
-
import gradio as gr
|
13 |
-
import nltk
|
14 |
-
import torch
|
15 |
-
from det2rec import *
|
16 |
-
|
17 |
-
_here = Path(__file__).parent
|
18 |
-
|
19 |
-
nltk.download("stopwords") # TODO=find where this requirement originates from
|
20 |
-
|
21 |
-
|
22 |
-
def load_uploaded_file(file_obj, temp_dir: Path = None):
|
23 |
-
"""
|
24 |
-
load_uploaded_file - process an uploaded file
|
25 |
-
Args:
|
26 |
-
file_obj (POTENTIALLY list): Gradio file object inside a list
|
27 |
-
Returns:
|
28 |
-
str, the uploaded file contents
|
29 |
-
"""
|
30 |
-
|
31 |
-
# check if mysterious file object is a list
|
32 |
-
if isinstance(file_obj, list):
|
33 |
-
file_obj = file_obj[0]
|
34 |
-
file_path = Path(file_obj.name)
|
35 |
-
|
36 |
-
if temp_dir is None:
|
37 |
-
_temp_dir = _here / "temp"
|
38 |
-
_temp_dir.mkdir(exist_ok=True)
|
39 |
-
|
40 |
-
try:
|
41 |
-
pdf_bytes_obj = open(file_path, "rb").read()
|
42 |
-
temp_path = temp_dir / file_path.name if temp_dir else file_path
|
43 |
-
# save to PDF file
|
44 |
-
with open(temp_path, "wb") as f:
|
45 |
-
f.write(pdf_bytes_obj)
|
46 |
-
logging.info(f"The uploaded file saved to {temp_path}")
|
47 |
-
return str(temp_path.resolve())
|
48 |
-
|
49 |
-
except Exception as e:
|
50 |
-
logging.error(f"Trying to load file with path {file_path}, error: {e}")
|
51 |
-
print(f"Trying to load file with path {file_path}, error: {e}")
|
52 |
-
return None
|
53 |
-
|
54 |
-
|
55 |
-
def convert_PDF(
|
56 |
-
pdf_obj,
|
57 |
-
language: str = "en",
|
58 |
-
max_pages=20,
|
59 |
-
):
|
60 |
-
"""
|
61 |
-
convert_PDF - convert a PDF file to text
|
62 |
-
Args:
|
63 |
-
pdf_bytes_obj (bytes): PDF file contents
|
64 |
-
language (str, optional): Language to use for OCR. Defaults to "en".
|
65 |
-
Returns:
|
66 |
-
str, the PDF file contents as text
|
67 |
-
"""
|
68 |
-
# clear local text cache
|
69 |
-
rm_local_text_files()
|
70 |
-
global ocr_model
|
71 |
-
st = time.perf_counter()
|
72 |
-
if isinstance(pdf_obj, list):
|
73 |
-
pdf_obj = pdf_obj[0]
|
74 |
-
file_path = Path(pdf_obj.name)
|
75 |
-
if not file_path.suffix == ".pdf":
|
76 |
-
logging.error(f"File {file_path} is not a PDF file")
|
77 |
-
|
78 |
-
html_error = f"""
|
79 |
-
<div style="color: red; font-size: 20px; font-weight: bold;">
|
80 |
-
File {file_path} is not a PDF file. Please upload a PDF file.
|
81 |
-
</div>
|
82 |
-
"""
|
83 |
-
return "File is not a PDF file", html_error, None
|
84 |
-
|
85 |
-
conversion_stats = convert_PDF_to_Text(
|
86 |
-
file_path,
|
87 |
-
ocr_model=ocr_model,
|
88 |
-
max_pages=max_pages,
|
89 |
-
)
|
90 |
-
converted_txt = conversion_stats["converted_text"]
|
91 |
-
num_pages = conversion_stats["num_pages"]
|
92 |
-
was_truncated = conversion_stats["truncated"]
|
93 |
-
# if alt_lang: # TODO: fix this
|
94 |
-
|
95 |
-
rt = round((time.perf_counter() - st) / 60, 2)
|
96 |
-
print(f"Runtime: {rt} minutes")
|
97 |
-
html = ""
|
98 |
-
if was_truncated:
|
99 |
-
html += f"<p>WARNING - PDF was truncated to {max_pages} pages</p>"
|
100 |
-
html += f"<p>Runtime: {rt} minutes on CPU for {num_pages} pages</p>"
|
101 |
-
|
102 |
-
_output_name = f"RESULT_{file_path.stem}_OCR.txt"
|
103 |
-
with open(_output_name, "w", encoding="utf-8", errors="ignore") as f:
|
104 |
-
f.write(converted_txt)
|
105 |
-
|
106 |
-
return converted_txt, html, _output_name
|
107 |
-
|
108 |
-
|
109 |
-
if __name__ == "__main__":
|
110 |
-
logging.info("Starting app")
|
111 |
-
|
112 |
-
use_GPU = torch.cuda.is_available()
|
113 |
-
logging.info(f"Using GPU status: {use_GPU}")
|
114 |
-
logging.info("Loading OCR model")
|
115 |
-
with contextlib.redirect_stdout(None):
|
116 |
-
ocr_model = ocr_predictor(
|
117 |
-
"db_resnet50",
|
118 |
-
"crnn_mobilenet_v3_large",
|
119 |
-
pretrained=True,
|
120 |
-
assume_straight_pages=True,
|
121 |
-
)
|
122 |
-
|
123 |
-
# define pdf bytes as None
|
124 |
-
pdf_obj = _here / "exampler.pdf"
|
125 |
-
pdf_obj = str(pdf_obj.resolve())
|
126 |
-
_temp_dir = _here / "temp"
|
127 |
-
_temp_dir.mkdir(exist_ok=True)
|
128 |
-
|
129 |
-
logging.info("starting demo")
|
130 |
-
demo = gr.Blocks()
|
131 |
-
|
132 |
-
with demo:
|
133 |
-
|
134 |
-
gr.Markdown("# PDF to Text")
|
135 |
-
gr.Markdown(
|
136 |
-
"A basic demo for end-to-end text detection and recognition where the input will be in pdf format and the result is text conversion using OCR from the [doctr](https://mindee.github.io/doctr/index.html) package"
|
137 |
-
)
|
138 |
-
gr.Markdown("---")
|
139 |
-
gr.Markdown("---")
|
140 |
-
|
141 |
-
with gr.Column():
|
142 |
-
|
143 |
-
gr.Markdown("## Load Inputs")
|
144 |
-
gr.Markdown("Upload your own file & replace the default. Files should be < 10MB to avoid upload issues - search for a PDF compressor online as needed.")
|
145 |
-
gr.Markdown(
|
146 |
-
"_If no file is uploaded, a sample PDF will be used. PDFs are truncated to 20 pages._"
|
147 |
-
)
|
148 |
-
|
149 |
-
uploaded_file = gr.File(
|
150 |
-
label="Upload a PDF file",
|
151 |
-
file_count="single",
|
152 |
-
type="file",
|
153 |
-
value=_here / "exampler.pdf",
|
154 |
-
)
|
155 |
-
|
156 |
-
gr.Markdown("---")
|
157 |
-
|
158 |
-
with gr.Column():
|
159 |
-
gr.Markdown("## Convert PDF to Text")
|
160 |
-
convert_button = gr.Button("Convert PDF!", variant="primary")
|
161 |
-
out_placeholder = gr.HTML("<p><em>Output will appear below:</em></p>")
|
162 |
-
gr.Markdown("### Output")
|
163 |
-
OCR_text = gr.Textbox(
|
164 |
-
label="OCR Result", placeholder="The OCR text will appear here"
|
165 |
-
)
|
166 |
-
text_file = gr.File(
|
167 |
-
label="Download Text File",
|
168 |
-
file_count="single",
|
169 |
-
type="file",
|
170 |
-
interactive=False,
|
171 |
-
)
|
172 |
-
|
173 |
-
convert_button.click(
|
174 |
-
fn=convert_PDF,
|
175 |
-
inputs=[uploaded_file],
|
176 |
-
outputs=[OCR_text, out_placeholder, text_file],
|
177 |
-
)
|
178 |
-
demo.launch(enable_queue=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AliUsama98/Aliusama_spellchecker/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Aliusama Spellchecker
|
3 |
-
emoji: 👀
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.50.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alpaca233/ChatPDF-GUI/gpt_reader/prompt.py
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
BASE_POINTS = """
|
2 |
-
1. Who are the authors?
|
3 |
-
2. What is the process of the proposed method?
|
4 |
-
3. What is the performance of the proposed method? Please note down its performance metrics.
|
5 |
-
4. What are the baseline models and their performances? Please note down these baseline methods.
|
6 |
-
5. What dataset did this paper use?
|
7 |
-
"""
|
8 |
-
|
9 |
-
READING_PROMPT = """
|
10 |
-
You are a researcher helper bot. You can help the user with research paper reading and summarizing. \n
|
11 |
-
Now I am going to send you a paper. You need to read it and summarize it for me part by part. \n
|
12 |
-
When you are reading, You need to focus on these key points:{}
|
13 |
-
"""
|
14 |
-
|
15 |
-
READING_PROMT_V2 = """
|
16 |
-
You are a researcher helper bot. You can help the user with research paper reading and summarizing. \n
|
17 |
-
Now I am going to send you a paper. You need to read it and summarize it for me part by part. \n
|
18 |
-
When you are reading, You need to focus on these key points:{},
|
19 |
-
|
20 |
-
And You need to generate a brief but informative title for this part.
|
21 |
-
Your return format:
|
22 |
-
- title: '...'
|
23 |
-
- summary: '...'
|
24 |
-
"""
|
25 |
-
|
26 |
-
SUMMARY_PROMPT = "You are a researcher helper bot. Now you need to read the summaries of a research paper."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/dataset.py
DELETED
@@ -1,124 +0,0 @@
|
|
1 |
-
import numbers
|
2 |
-
import os
|
3 |
-
import queue as Queue
|
4 |
-
import threading
|
5 |
-
|
6 |
-
import mxnet as mx
|
7 |
-
import numpy as np
|
8 |
-
import torch
|
9 |
-
from torch.utils.data import DataLoader, Dataset
|
10 |
-
from torchvision import transforms
|
11 |
-
|
12 |
-
|
13 |
-
class BackgroundGenerator(threading.Thread):
|
14 |
-
def __init__(self, generator, local_rank, max_prefetch=6):
|
15 |
-
super(BackgroundGenerator, self).__init__()
|
16 |
-
self.queue = Queue.Queue(max_prefetch)
|
17 |
-
self.generator = generator
|
18 |
-
self.local_rank = local_rank
|
19 |
-
self.daemon = True
|
20 |
-
self.start()
|
21 |
-
|
22 |
-
def run(self):
|
23 |
-
torch.cuda.set_device(self.local_rank)
|
24 |
-
for item in self.generator:
|
25 |
-
self.queue.put(item)
|
26 |
-
self.queue.put(None)
|
27 |
-
|
28 |
-
def next(self):
|
29 |
-
next_item = self.queue.get()
|
30 |
-
if next_item is None:
|
31 |
-
raise StopIteration
|
32 |
-
return next_item
|
33 |
-
|
34 |
-
def __next__(self):
|
35 |
-
return self.next()
|
36 |
-
|
37 |
-
def __iter__(self):
|
38 |
-
return self
|
39 |
-
|
40 |
-
|
41 |
-
class DataLoaderX(DataLoader):
|
42 |
-
|
43 |
-
def __init__(self, local_rank, **kwargs):
|
44 |
-
super(DataLoaderX, self).__init__(**kwargs)
|
45 |
-
self.stream = torch.cuda.Stream(local_rank)
|
46 |
-
self.local_rank = local_rank
|
47 |
-
|
48 |
-
def __iter__(self):
|
49 |
-
self.iter = super(DataLoaderX, self).__iter__()
|
50 |
-
self.iter = BackgroundGenerator(self.iter, self.local_rank)
|
51 |
-
self.preload()
|
52 |
-
return self
|
53 |
-
|
54 |
-
def preload(self):
|
55 |
-
self.batch = next(self.iter, None)
|
56 |
-
if self.batch is None:
|
57 |
-
return None
|
58 |
-
with torch.cuda.stream(self.stream):
|
59 |
-
for k in range(len(self.batch)):
|
60 |
-
self.batch[k] = self.batch[k].to(device=self.local_rank, non_blocking=True)
|
61 |
-
|
62 |
-
def __next__(self):
|
63 |
-
torch.cuda.current_stream().wait_stream(self.stream)
|
64 |
-
batch = self.batch
|
65 |
-
if batch is None:
|
66 |
-
raise StopIteration
|
67 |
-
self.preload()
|
68 |
-
return batch
|
69 |
-
|
70 |
-
|
71 |
-
class MXFaceDataset(Dataset):
|
72 |
-
def __init__(self, root_dir, local_rank):
|
73 |
-
super(MXFaceDataset, self).__init__()
|
74 |
-
self.transform = transforms.Compose(
|
75 |
-
[transforms.ToPILImage(),
|
76 |
-
transforms.RandomHorizontalFlip(),
|
77 |
-
transforms.ToTensor(),
|
78 |
-
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
|
79 |
-
])
|
80 |
-
self.root_dir = root_dir
|
81 |
-
self.local_rank = local_rank
|
82 |
-
path_imgrec = os.path.join(root_dir, 'train.rec')
|
83 |
-
path_imgidx = os.path.join(root_dir, 'train.idx')
|
84 |
-
self.imgrec = mx.recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r')
|
85 |
-
s = self.imgrec.read_idx(0)
|
86 |
-
header, _ = mx.recordio.unpack(s)
|
87 |
-
if header.flag > 0:
|
88 |
-
self.header0 = (int(header.label[0]), int(header.label[1]))
|
89 |
-
self.imgidx = np.array(range(1, int(header.label[0])))
|
90 |
-
else:
|
91 |
-
self.imgidx = np.array(list(self.imgrec.keys))
|
92 |
-
|
93 |
-
def __getitem__(self, index):
|
94 |
-
idx = self.imgidx[index]
|
95 |
-
s = self.imgrec.read_idx(idx)
|
96 |
-
header, img = mx.recordio.unpack(s)
|
97 |
-
label = header.label
|
98 |
-
if not isinstance(label, numbers.Number):
|
99 |
-
label = label[0]
|
100 |
-
label = torch.tensor(label, dtype=torch.long)
|
101 |
-
sample = mx.image.imdecode(img).asnumpy()
|
102 |
-
if self.transform is not None:
|
103 |
-
sample = self.transform(sample)
|
104 |
-
return sample, label
|
105 |
-
|
106 |
-
def __len__(self):
|
107 |
-
return len(self.imgidx)
|
108 |
-
|
109 |
-
|
110 |
-
class SyntheticDataset(Dataset):
|
111 |
-
def __init__(self, local_rank):
|
112 |
-
super(SyntheticDataset, self).__init__()
|
113 |
-
img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.int32)
|
114 |
-
img = np.transpose(img, (2, 0, 1))
|
115 |
-
img = torch.from_numpy(img).squeeze(0).float()
|
116 |
-
img = ((img / 255) - 0.5) / 0.5
|
117 |
-
self.img = img
|
118 |
-
self.label = 1
|
119 |
-
|
120 |
-
def __getitem__(self, index):
|
121 |
-
return self.img, self.label
|
122 |
-
|
123 |
-
def __len__(self):
|
124 |
-
return 1000000
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/partial_fc.py
DELETED
@@ -1,222 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
import os
|
3 |
-
|
4 |
-
import torch
|
5 |
-
import torch.distributed as dist
|
6 |
-
from torch.nn import Module
|
7 |
-
from torch.nn.functional import normalize, linear
|
8 |
-
from torch.nn.parameter import Parameter
|
9 |
-
|
10 |
-
|
11 |
-
class PartialFC(Module):
|
12 |
-
"""
|
13 |
-
Author: {Xiang An, Yang Xiao, XuHan Zhu} in DeepGlint,
|
14 |
-
Partial FC: Training 10 Million Identities on a Single Machine
|
15 |
-
See the original paper:
|
16 |
-
https://arxiv.org/abs/2010.05222
|
17 |
-
"""
|
18 |
-
|
19 |
-
@torch.no_grad()
|
20 |
-
def __init__(self, rank, local_rank, world_size, batch_size, resume,
|
21 |
-
margin_softmax, num_classes, sample_rate=1.0, embedding_size=512, prefix="./"):
|
22 |
-
"""
|
23 |
-
rank: int
|
24 |
-
Unique process(GPU) ID from 0 to world_size - 1.
|
25 |
-
local_rank: int
|
26 |
-
Unique process(GPU) ID within the server from 0 to 7.
|
27 |
-
world_size: int
|
28 |
-
Number of GPU.
|
29 |
-
batch_size: int
|
30 |
-
Batch size on current rank(GPU).
|
31 |
-
resume: bool
|
32 |
-
Select whether to restore the weight of softmax.
|
33 |
-
margin_softmax: callable
|
34 |
-
A function of margin softmax, eg: cosface, arcface.
|
35 |
-
num_classes: int
|
36 |
-
The number of class center storage in current rank(CPU/GPU), usually is total_classes // world_size,
|
37 |
-
required.
|
38 |
-
sample_rate: float
|
39 |
-
The partial fc sampling rate, when the number of classes increases to more than 2 millions, Sampling
|
40 |
-
can greatly speed up training, and reduce a lot of GPU memory, default is 1.0.
|
41 |
-
embedding_size: int
|
42 |
-
The feature dimension, default is 512.
|
43 |
-
prefix: str
|
44 |
-
Path for save checkpoint, default is './'.
|
45 |
-
"""
|
46 |
-
super(PartialFC, self).__init__()
|
47 |
-
#
|
48 |
-
self.num_classes: int = num_classes
|
49 |
-
self.rank: int = rank
|
50 |
-
self.local_rank: int = local_rank
|
51 |
-
self.device: torch.device = torch.device("cuda:{}".format(self.local_rank))
|
52 |
-
self.world_size: int = world_size
|
53 |
-
self.batch_size: int = batch_size
|
54 |
-
self.margin_softmax: callable = margin_softmax
|
55 |
-
self.sample_rate: float = sample_rate
|
56 |
-
self.embedding_size: int = embedding_size
|
57 |
-
self.prefix: str = prefix
|
58 |
-
self.num_local: int = num_classes // world_size + int(rank < num_classes % world_size)
|
59 |
-
self.class_start: int = num_classes // world_size * rank + min(rank, num_classes % world_size)
|
60 |
-
self.num_sample: int = int(self.sample_rate * self.num_local)
|
61 |
-
|
62 |
-
self.weight_name = os.path.join(self.prefix, "rank_{}_softmax_weight.pt".format(self.rank))
|
63 |
-
self.weight_mom_name = os.path.join(self.prefix, "rank_{}_softmax_weight_mom.pt".format(self.rank))
|
64 |
-
|
65 |
-
if resume:
|
66 |
-
try:
|
67 |
-
self.weight: torch.Tensor = torch.load(self.weight_name)
|
68 |
-
self.weight_mom: torch.Tensor = torch.load(self.weight_mom_name)
|
69 |
-
if self.weight.shape[0] != self.num_local or self.weight_mom.shape[0] != self.num_local:
|
70 |
-
raise IndexError
|
71 |
-
logging.info("softmax weight resume successfully!")
|
72 |
-
logging.info("softmax weight mom resume successfully!")
|
73 |
-
except (FileNotFoundError, KeyError, IndexError):
|
74 |
-
self.weight = torch.normal(0, 0.01, (self.num_local, self.embedding_size), device=self.device)
|
75 |
-
self.weight_mom: torch.Tensor = torch.zeros_like(self.weight)
|
76 |
-
logging.info("softmax weight init!")
|
77 |
-
logging.info("softmax weight mom init!")
|
78 |
-
else:
|
79 |
-
self.weight = torch.normal(0, 0.01, (self.num_local, self.embedding_size), device=self.device)
|
80 |
-
self.weight_mom: torch.Tensor = torch.zeros_like(self.weight)
|
81 |
-
logging.info("softmax weight init successfully!")
|
82 |
-
logging.info("softmax weight mom init successfully!")
|
83 |
-
self.stream: torch.cuda.Stream = torch.cuda.Stream(local_rank)
|
84 |
-
|
85 |
-
self.index = None
|
86 |
-
if int(self.sample_rate) == 1:
|
87 |
-
self.update = lambda: 0
|
88 |
-
self.sub_weight = Parameter(self.weight)
|
89 |
-
self.sub_weight_mom = self.weight_mom
|
90 |
-
else:
|
91 |
-
self.sub_weight = Parameter(torch.empty((0, 0)).cuda(local_rank))
|
92 |
-
|
93 |
-
def save_params(self):
|
94 |
-
""" Save softmax weight for each rank on prefix
|
95 |
-
"""
|
96 |
-
torch.save(self.weight.data, self.weight_name)
|
97 |
-
torch.save(self.weight_mom, self.weight_mom_name)
|
98 |
-
|
99 |
-
@torch.no_grad()
|
100 |
-
def sample(self, total_label):
|
101 |
-
"""
|
102 |
-
Sample all positive class centers in each rank, and random select neg class centers to filling a fixed
|
103 |
-
`num_sample`.
|
104 |
-
|
105 |
-
total_label: tensor
|
106 |
-
Label after all gather, which cross all GPUs.
|
107 |
-
"""
|
108 |
-
index_positive = (self.class_start <= total_label) & (total_label < self.class_start + self.num_local)
|
109 |
-
total_label[~index_positive] = -1
|
110 |
-
total_label[index_positive] -= self.class_start
|
111 |
-
if int(self.sample_rate) != 1:
|
112 |
-
positive = torch.unique(total_label[index_positive], sorted=True)
|
113 |
-
if self.num_sample - positive.size(0) >= 0:
|
114 |
-
perm = torch.rand(size=[self.num_local], device=self.device)
|
115 |
-
perm[positive] = 2.0
|
116 |
-
index = torch.topk(perm, k=self.num_sample)[1]
|
117 |
-
index = index.sort()[0]
|
118 |
-
else:
|
119 |
-
index = positive
|
120 |
-
self.index = index
|
121 |
-
total_label[index_positive] = torch.searchsorted(index, total_label[index_positive])
|
122 |
-
self.sub_weight = Parameter(self.weight[index])
|
123 |
-
self.sub_weight_mom = self.weight_mom[index]
|
124 |
-
|
125 |
-
def forward(self, total_features, norm_weight):
|
126 |
-
""" Partial fc forward, `logits = X * sample(W)`
|
127 |
-
"""
|
128 |
-
torch.cuda.current_stream().wait_stream(self.stream)
|
129 |
-
logits = linear(total_features, norm_weight)
|
130 |
-
return logits
|
131 |
-
|
132 |
-
@torch.no_grad()
|
133 |
-
def update(self):
|
134 |
-
""" Set updated weight and weight_mom to memory bank.
|
135 |
-
"""
|
136 |
-
self.weight_mom[self.index] = self.sub_weight_mom
|
137 |
-
self.weight[self.index] = self.sub_weight
|
138 |
-
|
139 |
-
def prepare(self, label, optimizer):
|
140 |
-
"""
|
141 |
-
get sampled class centers for cal softmax.
|
142 |
-
|
143 |
-
label: tensor
|
144 |
-
Label tensor on each rank.
|
145 |
-
optimizer: opt
|
146 |
-
Optimizer for partial fc, which need to get weight mom.
|
147 |
-
"""
|
148 |
-
with torch.cuda.stream(self.stream):
|
149 |
-
total_label = torch.zeros(
|
150 |
-
size=[self.batch_size * self.world_size], device=self.device, dtype=torch.long)
|
151 |
-
dist.all_gather(list(total_label.chunk(self.world_size, dim=0)), label)
|
152 |
-
self.sample(total_label)
|
153 |
-
optimizer.state.pop(optimizer.param_groups[-1]['params'][0], None)
|
154 |
-
optimizer.param_groups[-1]['params'][0] = self.sub_weight
|
155 |
-
optimizer.state[self.sub_weight]['momentum_buffer'] = self.sub_weight_mom
|
156 |
-
norm_weight = normalize(self.sub_weight)
|
157 |
-
return total_label, norm_weight
|
158 |
-
|
159 |
-
def forward_backward(self, label, features, optimizer):
|
160 |
-
"""
|
161 |
-
Partial fc forward and backward with model parallel
|
162 |
-
|
163 |
-
label: tensor
|
164 |
-
Label tensor on each rank(GPU)
|
165 |
-
features: tensor
|
166 |
-
Features tensor on each rank(GPU)
|
167 |
-
optimizer: optimizer
|
168 |
-
Optimizer for partial fc
|
169 |
-
|
170 |
-
Returns:
|
171 |
-
--------
|
172 |
-
x_grad: tensor
|
173 |
-
The gradient of features.
|
174 |
-
loss_v: tensor
|
175 |
-
Loss value for cross entropy.
|
176 |
-
"""
|
177 |
-
total_label, norm_weight = self.prepare(label, optimizer)
|
178 |
-
total_features = torch.zeros(
|
179 |
-
size=[self.batch_size * self.world_size, self.embedding_size], device=self.device)
|
180 |
-
dist.all_gather(list(total_features.chunk(self.world_size, dim=0)), features.data)
|
181 |
-
total_features.requires_grad = True
|
182 |
-
|
183 |
-
logits = self.forward(total_features, norm_weight)
|
184 |
-
logits = self.margin_softmax(logits, total_label)
|
185 |
-
|
186 |
-
with torch.no_grad():
|
187 |
-
max_fc = torch.max(logits, dim=1, keepdim=True)[0]
|
188 |
-
dist.all_reduce(max_fc, dist.ReduceOp.MAX)
|
189 |
-
|
190 |
-
# calculate exp(logits) and all-reduce
|
191 |
-
logits_exp = torch.exp(logits - max_fc)
|
192 |
-
logits_sum_exp = logits_exp.sum(dim=1, keepdims=True)
|
193 |
-
dist.all_reduce(logits_sum_exp, dist.ReduceOp.SUM)
|
194 |
-
|
195 |
-
# calculate prob
|
196 |
-
logits_exp.div_(logits_sum_exp)
|
197 |
-
|
198 |
-
# get one-hot
|
199 |
-
grad = logits_exp
|
200 |
-
index = torch.where(total_label != -1)[0]
|
201 |
-
one_hot = torch.zeros(size=[index.size()[0], grad.size()[1]], device=grad.device)
|
202 |
-
one_hot.scatter_(1, total_label[index, None], 1)
|
203 |
-
|
204 |
-
# calculate loss
|
205 |
-
loss = torch.zeros(grad.size()[0], 1, device=grad.device)
|
206 |
-
loss[index] = grad[index].gather(1, total_label[index, None])
|
207 |
-
dist.all_reduce(loss, dist.ReduceOp.SUM)
|
208 |
-
loss_v = loss.clamp_min_(1e-30).log_().mean() * (-1)
|
209 |
-
|
210 |
-
# calculate grad
|
211 |
-
grad[index] -= one_hot
|
212 |
-
grad.div_(self.batch_size * self.world_size)
|
213 |
-
|
214 |
-
logits.backward(grad)
|
215 |
-
if total_features.grad is not None:
|
216 |
-
total_features.grad.detach_()
|
217 |
-
x_grad: torch.Tensor = torch.zeros_like(features, requires_grad=True)
|
218 |
-
# feature gradient all-reduce
|
219 |
-
dist.reduce_scatter(x_grad, list(total_features.grad.chunk(self.world_size, dim=0)))
|
220 |
-
x_grad = x_grad * self.world_size
|
221 |
-
# backward backbone
|
222 |
-
return x_grad, loss_v
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/training/loss.py
DELETED
@@ -1,159 +0,0 @@
|
|
1 |
-
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
"""Loss functions."""
|
10 |
-
|
11 |
-
import numpy as np
|
12 |
-
import torch
|
13 |
-
from torch_utils import training_stats
|
14 |
-
from torch_utils.ops import conv2d_gradfix
|
15 |
-
from torch_utils.ops import upfirdn2d
|
16 |
-
|
17 |
-
# ----------------------------------------------------------------------------
|
18 |
-
|
19 |
-
|
20 |
-
class Loss:
|
21 |
-
# to be overridden by subclass
|
22 |
-
def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, gain, cur_nimg):
|
23 |
-
raise NotImplementedError()
|
24 |
-
|
25 |
-
# ----------------------------------------------------------------------------
|
26 |
-
|
27 |
-
|
28 |
-
class StyleGAN2Loss(Loss):
|
29 |
-
def __init__(self, device, G, D, augment_pipe=None, r1_gamma=10, style_mixing_prob=0, pl_weight=0, pl_batch_shrink=2, pl_decay=0.01, pl_no_weight_grad=False, blur_init_sigma=0, blur_fade_kimg=0):
|
30 |
-
super().__init__()
|
31 |
-
self.device = device
|
32 |
-
self.G = G
|
33 |
-
self.D = D
|
34 |
-
self.augment_pipe = augment_pipe
|
35 |
-
self.r1_gamma = r1_gamma
|
36 |
-
self.style_mixing_prob = style_mixing_prob
|
37 |
-
self.pl_weight = pl_weight
|
38 |
-
self.pl_batch_shrink = pl_batch_shrink
|
39 |
-
self.pl_decay = pl_decay
|
40 |
-
self.pl_no_weight_grad = pl_no_weight_grad
|
41 |
-
self.pl_mean = torch.zeros([], device=device)
|
42 |
-
self.blur_init_sigma = blur_init_sigma
|
43 |
-
self.blur_fade_kimg = blur_fade_kimg
|
44 |
-
|
45 |
-
def run_G(self, z, c, update_emas=False):
|
46 |
-
ws = self.G.mapping(z, c, update_emas=update_emas)
|
47 |
-
if self.style_mixing_prob > 0:
|
48 |
-
with torch.autograd.profiler.record_function('style_mixing'):
|
49 |
-
cutoff = torch.empty([], dtype=torch.int64,
|
50 |
-
device=ws.device).random_(1, ws.shape[1])
|
51 |
-
cutoff = torch.where(torch.rand(
|
52 |
-
[], device=ws.device) < self.style_mixing_prob, cutoff, torch.full_like(cutoff, ws.shape[1]))
|
53 |
-
ws[:, cutoff:] = self.G.mapping(
|
54 |
-
torch.randn_like(z), c, update_emas=False)[:, cutoff:]
|
55 |
-
img = self.G.synthesis(ws, update_emas=update_emas)
|
56 |
-
return img, ws
|
57 |
-
|
58 |
-
def run_D(self, img, c, blur_sigma=0, update_emas=False):
|
59 |
-
blur_size = np.floor(blur_sigma * 3)
|
60 |
-
if blur_size > 0:
|
61 |
-
with torch.autograd.profiler.record_function('blur'):
|
62 |
-
f = torch.arange(-blur_size, blur_size + 1,
|
63 |
-
device=img.device).div(blur_sigma).square().neg().exp2()
|
64 |
-
img = upfirdn2d.filter2d(img, f / f.sum())
|
65 |
-
if self.augment_pipe is not None:
|
66 |
-
img = self.augment_pipe(img)
|
67 |
-
logits = self.D(img, c, update_emas=update_emas)
|
68 |
-
return logits
|
69 |
-
|
70 |
-
def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, gain, cur_nimg):
|
71 |
-
assert phase in ['Gmain', 'Greg', 'Gboth', 'Dmain', 'Dreg', 'Dboth']
|
72 |
-
if self.pl_weight == 0:
|
73 |
-
phase = {'Greg': 'none', 'Gboth': 'Gmain'}.get(phase, phase)
|
74 |
-
if self.r1_gamma == 0:
|
75 |
-
phase = {'Dreg': 'none', 'Dboth': 'Dmain'}.get(phase, phase)
|
76 |
-
blur_sigma = max(1 - cur_nimg / (self.blur_fade_kimg * 1e3), 0) * \
|
77 |
-
self.blur_init_sigma if self.blur_fade_kimg > 0 else 0
|
78 |
-
|
79 |
-
# Gmain: Maximize logits for generated images.
|
80 |
-
if phase in ['Gmain', 'Gboth']:
|
81 |
-
with torch.autograd.profiler.record_function('Gmain_forward'):
|
82 |
-
gen_img, _gen_ws = self.run_G(gen_z, gen_c)
|
83 |
-
gen_logits = self.run_D(gen_img, gen_c, blur_sigma=blur_sigma)
|
84 |
-
training_stats.report('Loss/scores/fake', gen_logits)
|
85 |
-
training_stats.report('Loss/signs/fake', gen_logits.sign())
|
86 |
-
# -log(sigmoid(gen_logits))
|
87 |
-
loss_Gmain = torch.nn.functional.softplus(-gen_logits)
|
88 |
-
training_stats.report('Loss/G/loss', loss_Gmain)
|
89 |
-
with torch.autograd.profiler.record_function('Gmain_backward'):
|
90 |
-
loss_Gmain.mean().mul(gain).backward()
|
91 |
-
|
92 |
-
# Gpl: Apply path length regularization.
|
93 |
-
if phase in ['Greg', 'Gboth']:
|
94 |
-
with torch.autograd.profiler.record_function('Gpl_forward'):
|
95 |
-
batch_size = gen_z.shape[0] // self.pl_batch_shrink
|
96 |
-
gen_img, gen_ws = self.run_G(
|
97 |
-
gen_z[:batch_size], gen_c[:batch_size])
|
98 |
-
pl_noise = torch.randn_like(
|
99 |
-
gen_img) / np.sqrt(gen_img.shape[2] * gen_img.shape[3])
|
100 |
-
with torch.autograd.profiler.record_function('pl_grads'), conv2d_gradfix.no_weight_gradients(self.pl_no_weight_grad):
|
101 |
-
pl_grads = torch.autograd.grad(outputs=[(
|
102 |
-
gen_img * pl_noise).sum()], inputs=[gen_ws], create_graph=True, only_inputs=True)[0]
|
103 |
-
pl_lengths = pl_grads.square().sum(2).mean(1).sqrt()
|
104 |
-
pl_mean = self.pl_mean.lerp(pl_lengths.mean(), self.pl_decay)
|
105 |
-
self.pl_mean.copy_(pl_mean.detach())
|
106 |
-
pl_penalty = (pl_lengths - pl_mean).square()
|
107 |
-
training_stats.report('Loss/pl_penalty', pl_penalty)
|
108 |
-
loss_Gpl = pl_penalty * self.pl_weight
|
109 |
-
training_stats.report('Loss/G/reg', loss_Gpl)
|
110 |
-
with torch.autograd.profiler.record_function('Gpl_backward'):
|
111 |
-
loss_Gpl.mean().mul(gain).backward()
|
112 |
-
|
113 |
-
# Dmain: Minimize logits for generated images.
|
114 |
-
loss_Dgen = 0
|
115 |
-
if phase in ['Dmain', 'Dboth']:
|
116 |
-
with torch.autograd.profiler.record_function('Dgen_forward'):
|
117 |
-
gen_img, _gen_ws = self.run_G(gen_z, gen_c, update_emas=True)
|
118 |
-
gen_logits = self.run_D(
|
119 |
-
gen_img, gen_c, blur_sigma=blur_sigma, update_emas=True)
|
120 |
-
training_stats.report('Loss/scores/fake', gen_logits)
|
121 |
-
training_stats.report('Loss/signs/fake', gen_logits.sign())
|
122 |
-
loss_Dgen = torch.nn.functional.softplus(
|
123 |
-
gen_logits) # -log(1 - sigmoid(gen_logits))
|
124 |
-
with torch.autograd.profiler.record_function('Dgen_backward'):
|
125 |
-
loss_Dgen.mean().mul(gain).backward()
|
126 |
-
|
127 |
-
# Dmain: Maximize logits for real images.
|
128 |
-
# Dr1: Apply R1 regularization.
|
129 |
-
if phase in ['Dmain', 'Dreg', 'Dboth']:
|
130 |
-
name = 'Dreal' if phase == 'Dmain' else 'Dr1' if phase == 'Dreg' else 'Dreal_Dr1'
|
131 |
-
with torch.autograd.profiler.record_function(name + '_forward'):
|
132 |
-
real_img_tmp = real_img.detach().requires_grad_(
|
133 |
-
phase in ['Dreg', 'Dboth'])
|
134 |
-
real_logits = self.run_D(
|
135 |
-
real_img_tmp, real_c, blur_sigma=blur_sigma)
|
136 |
-
training_stats.report('Loss/scores/real', real_logits)
|
137 |
-
training_stats.report('Loss/signs/real', real_logits.sign())
|
138 |
-
|
139 |
-
loss_Dreal = 0
|
140 |
-
if phase in ['Dmain', 'Dboth']:
|
141 |
-
# -log(sigmoid(real_logits))
|
142 |
-
loss_Dreal = torch.nn.functional.softplus(-real_logits)
|
143 |
-
training_stats.report(
|
144 |
-
'Loss/D/loss', loss_Dgen + loss_Dreal)
|
145 |
-
|
146 |
-
loss_Dr1 = 0
|
147 |
-
if phase in ['Dreg', 'Dboth']:
|
148 |
-
with torch.autograd.profiler.record_function('r1_grads'), conv2d_gradfix.no_weight_gradients():
|
149 |
-
r1_grads = torch.autograd.grad(outputs=[real_logits.sum()], inputs=[
|
150 |
-
real_img_tmp], create_graph=True, only_inputs=True)[0]
|
151 |
-
r1_penalty = r1_grads.square().sum([1, 2, 3])
|
152 |
-
loss_Dr1 = r1_penalty * (self.r1_gamma / 2)
|
153 |
-
training_stats.report('Loss/r1_penalty', r1_penalty)
|
154 |
-
training_stats.report('Loss/D/reg', loss_Dr1)
|
155 |
-
|
156 |
-
with torch.autograd.profiler.record_function(name + '_backward'):
|
157 |
-
(loss_Dreal + loss_Dr1).mean().mul(gain).backward()
|
158 |
-
|
159 |
-
# ----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/_base_/schedules/schedule_20e.py
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
# optimizer
|
2 |
-
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
|
3 |
-
optimizer_config = dict(grad_clip=None)
|
4 |
-
# learning policy
|
5 |
-
lr_config = dict(
|
6 |
-
policy='step',
|
7 |
-
warmup='linear',
|
8 |
-
warmup_iters=500,
|
9 |
-
warmup_ratio=0.001,
|
10 |
-
step=[16, 19])
|
11 |
-
runner = dict(type='EpochBasedRunner', max_epochs=20)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/swin/mask_rcnn_swin_small_patch4_window7_mstrain_480-800_adamw_3x_coco.py
DELETED
@@ -1,80 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/mask_rcnn_swin_fpn.py',
|
3 |
-
'../_base_/datasets/coco_instance.py',
|
4 |
-
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
|
5 |
-
]
|
6 |
-
|
7 |
-
model = dict(
|
8 |
-
backbone=dict(
|
9 |
-
embed_dim=96,
|
10 |
-
depths=[2, 2, 18, 2],
|
11 |
-
num_heads=[3, 6, 12, 24],
|
12 |
-
window_size=7,
|
13 |
-
ape=False,
|
14 |
-
drop_path_rate=0.2,
|
15 |
-
patch_norm=True,
|
16 |
-
use_checkpoint=False
|
17 |
-
),
|
18 |
-
neck=dict(in_channels=[96, 192, 384, 768]))
|
19 |
-
|
20 |
-
img_norm_cfg = dict(
|
21 |
-
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
22 |
-
|
23 |
-
# augmentation strategy originates from DETR / Sparse RCNN
|
24 |
-
train_pipeline = [
|
25 |
-
dict(type='LoadImageFromFile'),
|
26 |
-
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
|
27 |
-
dict(type='RandomFlip', flip_ratio=0.5),
|
28 |
-
dict(type='AutoAugment',
|
29 |
-
policies=[
|
30 |
-
[
|
31 |
-
dict(type='Resize',
|
32 |
-
img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
|
33 |
-
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
|
34 |
-
(736, 1333), (768, 1333), (800, 1333)],
|
35 |
-
multiscale_mode='value',
|
36 |
-
keep_ratio=True)
|
37 |
-
],
|
38 |
-
[
|
39 |
-
dict(type='Resize',
|
40 |
-
img_scale=[(400, 1333), (500, 1333), (600, 1333)],
|
41 |
-
multiscale_mode='value',
|
42 |
-
keep_ratio=True),
|
43 |
-
dict(type='RandomCrop',
|
44 |
-
crop_type='absolute_range',
|
45 |
-
crop_size=(384, 600),
|
46 |
-
allow_negative_crop=True),
|
47 |
-
dict(type='Resize',
|
48 |
-
img_scale=[(480, 1333), (512, 1333), (544, 1333),
|
49 |
-
(576, 1333), (608, 1333), (640, 1333),
|
50 |
-
(672, 1333), (704, 1333), (736, 1333),
|
51 |
-
(768, 1333), (800, 1333)],
|
52 |
-
multiscale_mode='value',
|
53 |
-
override=True,
|
54 |
-
keep_ratio=True)
|
55 |
-
]
|
56 |
-
]),
|
57 |
-
dict(type='Normalize', **img_norm_cfg),
|
58 |
-
dict(type='Pad', size_divisor=32),
|
59 |
-
dict(type='DefaultFormatBundle'),
|
60 |
-
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
|
61 |
-
]
|
62 |
-
data = dict(train=dict(pipeline=train_pipeline))
|
63 |
-
|
64 |
-
optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
|
65 |
-
paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
|
66 |
-
'relative_position_bias_table': dict(decay_mult=0.),
|
67 |
-
'norm': dict(decay_mult=0.)}))
|
68 |
-
lr_config = dict(step=[27, 33])
|
69 |
-
runner = dict(type='EpochBasedRunnerAmp', max_epochs=36)
|
70 |
-
|
71 |
-
# do not use mmdet version fp16
|
72 |
-
fp16 = None
|
73 |
-
optimizer_config = dict(
|
74 |
-
type="DistOptimizerHook",
|
75 |
-
update_interval=1,
|
76 |
-
grad_clip=None,
|
77 |
-
coalesce=True,
|
78 |
-
bucket_size_mb=-1,
|
79 |
-
use_fp16=True,
|
80 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
_base_ = './fcn_hr18_512x1024_160k_cityscapes.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://msra/hrnetv2_w48',
|
4 |
-
backbone=dict(
|
5 |
-
extra=dict(
|
6 |
-
stage2=dict(num_channels=(48, 96)),
|
7 |
-
stage3=dict(num_channels=(48, 96, 192)),
|
8 |
-
stage4=dict(num_channels=(48, 96, 192, 384)))),
|
9 |
-
decode_head=dict(
|
10 |
-
in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384])))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Armandoliv/t5-summarize-app-scitldr/app.py
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import torch
|
3 |
-
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
4 |
-
|
5 |
-
tokenizer = AutoTokenizer.from_pretrained("Armandoliv/t5-small-summarizer-scitldr")
|
6 |
-
|
7 |
-
model = AutoModelForSeq2SeqLM.from_pretrained("Armandoliv/t5-small-summarizer-scitldr")
|
8 |
-
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
9 |
-
model = model.to(device)
|
10 |
-
|
11 |
-
def main_summarizer(text):
|
12 |
-
max_input_length = 1024
|
13 |
-
preprocess_text = text.strip().replace("\n"," ").replace("’", "'").strip()
|
14 |
-
tokenized_text = tokenizer.encode(preprocess_text, return_tensors="pt", truncation=True, max_length=max_input_length,).to(device)
|
15 |
-
|
16 |
-
summary_ids = model.generate(
|
17 |
-
tokenized_text,
|
18 |
-
max_length=256,
|
19 |
-
num_beams=8,
|
20 |
-
repetition_penalty=3.0,
|
21 |
-
length_penalty=2.5,
|
22 |
-
early_stopping=False
|
23 |
-
)
|
24 |
-
|
25 |
-
output = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
|
26 |
-
|
27 |
-
return output
|
28 |
-
|
29 |
-
inputs = [gr.Textbox(lines=10, placeholder="Text Here...", label="Input")]
|
30 |
-
outputs = gr.Text( label="Summary")
|
31 |
-
title="Text summarisation app"
|
32 |
-
description = "This demo uses AI Models to summarize long text.\nIt focus on scientific texts."
|
33 |
-
|
34 |
-
io = gr.Interface(fn=main_summarizer, inputs=inputs, outputs=outputs, title=title, description = description,
|
35 |
-
|
36 |
-
css= """.gr-button-primary { background: -webkit-linear-gradient(
|
37 |
-
90deg, #355764 0%, #55a8a1 100% ) !important; background: #355764;
|
38 |
-
background: linear-gradient(
|
39 |
-
90deg, #355764 0%, #55a8a1 100% ) !important;
|
40 |
-
background: -moz-linear-gradient( 90deg, #355764 0%, #55a8a1 100% ) !important;
|
41 |
-
background: -webkit-linear-gradient(
|
42 |
-
90deg, #355764 0%, #55a8a1 100% ) !important;
|
43 |
-
color:white !important}"""
|
44 |
-
)
|
45 |
-
|
46 |
-
io.launch()
|
47 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Artrajz/vits-simple-api/bert_vits2/commons.py
DELETED
@@ -1,161 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import numpy as np
|
3 |
-
import torch
|
4 |
-
from torch import nn
|
5 |
-
from torch.nn import functional as F
|
6 |
-
|
7 |
-
|
8 |
-
def init_weights(m, mean=0.0, std=0.01):
|
9 |
-
classname = m.__class__.__name__
|
10 |
-
if classname.find("Conv") != -1:
|
11 |
-
m.weight.data.normal_(mean, std)
|
12 |
-
|
13 |
-
|
14 |
-
def get_padding(kernel_size, dilation=1):
|
15 |
-
return int((kernel_size * dilation - dilation) / 2)
|
16 |
-
|
17 |
-
|
18 |
-
def convert_pad_shape(pad_shape):
|
19 |
-
l = pad_shape[::-1]
|
20 |
-
pad_shape = [item for sublist in l for item in sublist]
|
21 |
-
return pad_shape
|
22 |
-
|
23 |
-
|
24 |
-
def intersperse(lst, item):
|
25 |
-
result = [item] * (len(lst) * 2 + 1)
|
26 |
-
result[1::2] = lst
|
27 |
-
return result
|
28 |
-
|
29 |
-
|
30 |
-
def kl_divergence(m_p, logs_p, m_q, logs_q):
|
31 |
-
"""KL(P||Q)"""
|
32 |
-
kl = (logs_q - logs_p) - 0.5
|
33 |
-
kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2. * logs_q)
|
34 |
-
return kl
|
35 |
-
|
36 |
-
|
37 |
-
def rand_gumbel(shape):
|
38 |
-
"""Sample from the Gumbel distribution, protect from overflows."""
|
39 |
-
uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
|
40 |
-
return -torch.log(-torch.log(uniform_samples))
|
41 |
-
|
42 |
-
|
43 |
-
def rand_gumbel_like(x):
|
44 |
-
g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
|
45 |
-
return g
|
46 |
-
|
47 |
-
|
48 |
-
def slice_segments(x, ids_str, segment_size=4):
|
49 |
-
ret = torch.zeros_like(x[:, :, :segment_size])
|
50 |
-
for i in range(x.size(0)):
|
51 |
-
idx_str = ids_str[i]
|
52 |
-
idx_end = idx_str + segment_size
|
53 |
-
ret[i] = x[i, :, idx_str:idx_end]
|
54 |
-
return ret
|
55 |
-
|
56 |
-
|
57 |
-
def rand_slice_segments(x, x_lengths=None, segment_size=4):
|
58 |
-
b, d, t = x.size()
|
59 |
-
if x_lengths is None:
|
60 |
-
x_lengths = t
|
61 |
-
ids_str_max = x_lengths - segment_size + 1
|
62 |
-
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
|
63 |
-
ret = slice_segments(x, ids_str, segment_size)
|
64 |
-
return ret, ids_str
|
65 |
-
|
66 |
-
|
67 |
-
def get_timing_signal_1d(
|
68 |
-
length, channels, min_timescale=1.0, max_timescale=1.0e4):
|
69 |
-
position = torch.arange(length, dtype=torch.float)
|
70 |
-
num_timescales = channels // 2
|
71 |
-
log_timescale_increment = (
|
72 |
-
math.log(float(max_timescale) / float(min_timescale)) /
|
73 |
-
(num_timescales - 1))
|
74 |
-
inv_timescales = min_timescale * torch.exp(
|
75 |
-
torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
|
76 |
-
scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
|
77 |
-
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
|
78 |
-
signal = F.pad(signal, [0, 0, 0, channels % 2])
|
79 |
-
signal = signal.view(1, channels, length)
|
80 |
-
return signal
|
81 |
-
|
82 |
-
|
83 |
-
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
|
84 |
-
b, channels, length = x.size()
|
85 |
-
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
86 |
-
return x + signal.to(dtype=x.dtype, device=x.device)
|
87 |
-
|
88 |
-
|
89 |
-
def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
|
90 |
-
b, channels, length = x.size()
|
91 |
-
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
92 |
-
return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
|
93 |
-
|
94 |
-
|
95 |
-
def subsequent_mask(length):
|
96 |
-
mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
|
97 |
-
return mask
|
98 |
-
|
99 |
-
|
100 |
-
@torch.jit.script
|
101 |
-
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
|
102 |
-
n_channels_int = n_channels[0]
|
103 |
-
in_act = input_a + input_b
|
104 |
-
t_act = torch.tanh(in_act[:, :n_channels_int, :])
|
105 |
-
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
|
106 |
-
acts = t_act * s_act
|
107 |
-
return acts
|
108 |
-
|
109 |
-
|
110 |
-
def convert_pad_shape(pad_shape):
|
111 |
-
l = pad_shape[::-1]
|
112 |
-
pad_shape = [item for sublist in l for item in sublist]
|
113 |
-
return pad_shape
|
114 |
-
|
115 |
-
|
116 |
-
def shift_1d(x):
|
117 |
-
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
|
118 |
-
return x
|
119 |
-
|
120 |
-
|
121 |
-
def sequence_mask(length, max_length=None):
|
122 |
-
if max_length is None:
|
123 |
-
max_length = length.max()
|
124 |
-
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
|
125 |
-
return x.unsqueeze(0) < length.unsqueeze(1)
|
126 |
-
|
127 |
-
|
128 |
-
def generate_path(duration, mask):
|
129 |
-
"""
|
130 |
-
duration: [b, 1, t_x]
|
131 |
-
mask: [b, 1, t_y, t_x]
|
132 |
-
"""
|
133 |
-
device = duration.device
|
134 |
-
|
135 |
-
b, _, t_y, t_x = mask.shape
|
136 |
-
cum_duration = torch.cumsum(duration, -1)
|
137 |
-
|
138 |
-
cum_duration_flat = cum_duration.view(b * t_x)
|
139 |
-
path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
|
140 |
-
path = path.view(b, t_x, t_y)
|
141 |
-
path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
|
142 |
-
path = path.unsqueeze(1).transpose(2, 3) * mask
|
143 |
-
return path
|
144 |
-
|
145 |
-
|
146 |
-
def clip_grad_value_(parameters, clip_value, norm_type=2):
|
147 |
-
if isinstance(parameters, torch.Tensor):
|
148 |
-
parameters = [parameters]
|
149 |
-
parameters = list(filter(lambda p: p.grad is not None, parameters))
|
150 |
-
norm_type = float(norm_type)
|
151 |
-
if clip_value is not None:
|
152 |
-
clip_value = float(clip_value)
|
153 |
-
|
154 |
-
total_norm = 0
|
155 |
-
for p in parameters:
|
156 |
-
param_norm = p.grad.data.norm(norm_type)
|
157 |
-
total_norm += param_norm.item() ** norm_type
|
158 |
-
if clip_value is not None:
|
159 |
-
p.grad.data.clamp_(min=-clip_value, max=clip_value)
|
160 |
-
total_norm = total_norm ** (1. / norm_type)
|
161 |
-
return total_norm
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/network/auth.py
DELETED
@@ -1,559 +0,0 @@
|
|
1 |
-
"""Network Authentication Helpers
|
2 |
-
|
3 |
-
Contains interface (MultiDomainBasicAuth) and associated glue code for
|
4 |
-
providing credentials in the context of network requests.
|
5 |
-
"""
|
6 |
-
import logging
|
7 |
-
import os
|
8 |
-
import shutil
|
9 |
-
import subprocess
|
10 |
-
import sysconfig
|
11 |
-
import typing
|
12 |
-
import urllib.parse
|
13 |
-
from abc import ABC, abstractmethod
|
14 |
-
from functools import lru_cache
|
15 |
-
from os.path import commonprefix
|
16 |
-
from pathlib import Path
|
17 |
-
from typing import Any, Dict, List, NamedTuple, Optional, Tuple
|
18 |
-
|
19 |
-
from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
|
20 |
-
from pip._vendor.requests.models import Request, Response
|
21 |
-
from pip._vendor.requests.utils import get_netrc_auth
|
22 |
-
|
23 |
-
from pip._internal.utils.logging import getLogger
|
24 |
-
from pip._internal.utils.misc import (
|
25 |
-
ask,
|
26 |
-
ask_input,
|
27 |
-
ask_password,
|
28 |
-
remove_auth_from_url,
|
29 |
-
split_auth_netloc_from_url,
|
30 |
-
)
|
31 |
-
from pip._internal.vcs.versioncontrol import AuthInfo
|
32 |
-
|
33 |
-
logger = getLogger(__name__)
|
34 |
-
|
35 |
-
KEYRING_DISABLED = False
|
36 |
-
|
37 |
-
|
38 |
-
class Credentials(NamedTuple):
|
39 |
-
url: str
|
40 |
-
username: str
|
41 |
-
password: str
|
42 |
-
|
43 |
-
|
44 |
-
class KeyRingBaseProvider(ABC):
|
45 |
-
"""Keyring base provider interface"""
|
46 |
-
|
47 |
-
has_keyring: bool
|
48 |
-
|
49 |
-
@abstractmethod
|
50 |
-
def get_auth_info(self, url: str, username: Optional[str]) -> Optional[AuthInfo]:
|
51 |
-
...
|
52 |
-
|
53 |
-
@abstractmethod
|
54 |
-
def save_auth_info(self, url: str, username: str, password: str) -> None:
|
55 |
-
...
|
56 |
-
|
57 |
-
|
58 |
-
class KeyRingNullProvider(KeyRingBaseProvider):
|
59 |
-
"""Keyring null provider"""
|
60 |
-
|
61 |
-
has_keyring = False
|
62 |
-
|
63 |
-
def get_auth_info(self, url: str, username: Optional[str]) -> Optional[AuthInfo]:
|
64 |
-
return None
|
65 |
-
|
66 |
-
def save_auth_info(self, url: str, username: str, password: str) -> None:
|
67 |
-
return None
|
68 |
-
|
69 |
-
|
70 |
-
class KeyRingPythonProvider(KeyRingBaseProvider):
|
71 |
-
"""Keyring interface which uses locally imported `keyring`"""
|
72 |
-
|
73 |
-
has_keyring = True
|
74 |
-
|
75 |
-
def __init__(self) -> None:
|
76 |
-
import keyring
|
77 |
-
|
78 |
-
self.keyring = keyring
|
79 |
-
|
80 |
-
def get_auth_info(self, url: str, username: Optional[str]) -> Optional[AuthInfo]:
|
81 |
-
# Support keyring's get_credential interface which supports getting
|
82 |
-
# credentials without a username. This is only available for
|
83 |
-
# keyring>=15.2.0.
|
84 |
-
if hasattr(self.keyring, "get_credential"):
|
85 |
-
logger.debug("Getting credentials from keyring for %s", url)
|
86 |
-
cred = self.keyring.get_credential(url, username)
|
87 |
-
if cred is not None:
|
88 |
-
return cred.username, cred.password
|
89 |
-
return None
|
90 |
-
|
91 |
-
if username is not None:
|
92 |
-
logger.debug("Getting password from keyring for %s", url)
|
93 |
-
password = self.keyring.get_password(url, username)
|
94 |
-
if password:
|
95 |
-
return username, password
|
96 |
-
return None
|
97 |
-
|
98 |
-
def save_auth_info(self, url: str, username: str, password: str) -> None:
|
99 |
-
self.keyring.set_password(url, username, password)
|
100 |
-
|
101 |
-
|
102 |
-
class KeyRingCliProvider(KeyRingBaseProvider):
|
103 |
-
"""Provider which uses `keyring` cli
|
104 |
-
|
105 |
-
Instead of calling the keyring package installed alongside pip
|
106 |
-
we call keyring on the command line which will enable pip to
|
107 |
-
use which ever installation of keyring is available first in
|
108 |
-
PATH.
|
109 |
-
"""
|
110 |
-
|
111 |
-
has_keyring = True
|
112 |
-
|
113 |
-
def __init__(self, cmd: str) -> None:
|
114 |
-
self.keyring = cmd
|
115 |
-
|
116 |
-
def get_auth_info(self, url: str, username: Optional[str]) -> Optional[AuthInfo]:
|
117 |
-
# This is the default implementation of keyring.get_credential
|
118 |
-
# https://github.com/jaraco/keyring/blob/97689324abcf01bd1793d49063e7ca01e03d7d07/keyring/backend.py#L134-L139
|
119 |
-
if username is not None:
|
120 |
-
password = self._get_password(url, username)
|
121 |
-
if password is not None:
|
122 |
-
return username, password
|
123 |
-
return None
|
124 |
-
|
125 |
-
def save_auth_info(self, url: str, username: str, password: str) -> None:
|
126 |
-
return self._set_password(url, username, password)
|
127 |
-
|
128 |
-
def _get_password(self, service_name: str, username: str) -> Optional[str]:
|
129 |
-
"""Mirror the implementation of keyring.get_password using cli"""
|
130 |
-
if self.keyring is None:
|
131 |
-
return None
|
132 |
-
|
133 |
-
cmd = [self.keyring, "get", service_name, username]
|
134 |
-
env = os.environ.copy()
|
135 |
-
env["PYTHONIOENCODING"] = "utf-8"
|
136 |
-
res = subprocess.run(
|
137 |
-
cmd,
|
138 |
-
stdin=subprocess.DEVNULL,
|
139 |
-
stdout=subprocess.PIPE,
|
140 |
-
env=env,
|
141 |
-
)
|
142 |
-
if res.returncode:
|
143 |
-
return None
|
144 |
-
return res.stdout.decode("utf-8").strip(os.linesep)
|
145 |
-
|
146 |
-
def _set_password(self, service_name: str, username: str, password: str) -> None:
|
147 |
-
"""Mirror the implementation of keyring.set_password using cli"""
|
148 |
-
if self.keyring is None:
|
149 |
-
return None
|
150 |
-
env = os.environ.copy()
|
151 |
-
env["PYTHONIOENCODING"] = "utf-8"
|
152 |
-
subprocess.run(
|
153 |
-
[self.keyring, "set", service_name, username],
|
154 |
-
input=f"{password}{os.linesep}".encode("utf-8"),
|
155 |
-
env=env,
|
156 |
-
check=True,
|
157 |
-
)
|
158 |
-
return None
|
159 |
-
|
160 |
-
|
161 |
-
@lru_cache(maxsize=None)
|
162 |
-
def get_keyring_provider(provider: str) -> KeyRingBaseProvider:
|
163 |
-
logger.verbose("Keyring provider requested: %s", provider)
|
164 |
-
|
165 |
-
# keyring has previously failed and been disabled
|
166 |
-
if KEYRING_DISABLED:
|
167 |
-
provider = "disabled"
|
168 |
-
if provider in ["import", "auto"]:
|
169 |
-
try:
|
170 |
-
impl = KeyRingPythonProvider()
|
171 |
-
logger.verbose("Keyring provider set: import")
|
172 |
-
return impl
|
173 |
-
except ImportError:
|
174 |
-
pass
|
175 |
-
except Exception as exc:
|
176 |
-
# In the event of an unexpected exception
|
177 |
-
# we should warn the user
|
178 |
-
msg = "Installed copy of keyring fails with exception %s"
|
179 |
-
if provider == "auto":
|
180 |
-
msg = msg + ", trying to find a keyring executable as a fallback"
|
181 |
-
logger.warning(msg, exc, exc_info=logger.isEnabledFor(logging.DEBUG))
|
182 |
-
if provider in ["subprocess", "auto"]:
|
183 |
-
cli = shutil.which("keyring")
|
184 |
-
if cli and cli.startswith(sysconfig.get_path("scripts")):
|
185 |
-
# all code within this function is stolen from shutil.which implementation
|
186 |
-
@typing.no_type_check
|
187 |
-
def PATH_as_shutil_which_determines_it() -> str:
|
188 |
-
path = os.environ.get("PATH", None)
|
189 |
-
if path is None:
|
190 |
-
try:
|
191 |
-
path = os.confstr("CS_PATH")
|
192 |
-
except (AttributeError, ValueError):
|
193 |
-
# os.confstr() or CS_PATH is not available
|
194 |
-
path = os.defpath
|
195 |
-
# bpo-35755: Don't use os.defpath if the PATH environment variable is
|
196 |
-
# set to an empty string
|
197 |
-
|
198 |
-
return path
|
199 |
-
|
200 |
-
scripts = Path(sysconfig.get_path("scripts"))
|
201 |
-
|
202 |
-
paths = []
|
203 |
-
for path in PATH_as_shutil_which_determines_it().split(os.pathsep):
|
204 |
-
p = Path(path)
|
205 |
-
try:
|
206 |
-
if not p.samefile(scripts):
|
207 |
-
paths.append(path)
|
208 |
-
except FileNotFoundError:
|
209 |
-
pass
|
210 |
-
|
211 |
-
path = os.pathsep.join(paths)
|
212 |
-
|
213 |
-
cli = shutil.which("keyring", path=path)
|
214 |
-
|
215 |
-
if cli:
|
216 |
-
logger.verbose("Keyring provider set: subprocess with executable %s", cli)
|
217 |
-
return KeyRingCliProvider(cli)
|
218 |
-
|
219 |
-
logger.verbose("Keyring provider set: disabled")
|
220 |
-
return KeyRingNullProvider()
|
221 |
-
|
222 |
-
|
223 |
-
class MultiDomainBasicAuth(AuthBase):
|
224 |
-
def __init__(
|
225 |
-
self,
|
226 |
-
prompting: bool = True,
|
227 |
-
index_urls: Optional[List[str]] = None,
|
228 |
-
keyring_provider: str = "auto",
|
229 |
-
) -> None:
|
230 |
-
self.prompting = prompting
|
231 |
-
self.index_urls = index_urls
|
232 |
-
self.keyring_provider = keyring_provider # type: ignore[assignment]
|
233 |
-
self.passwords: Dict[str, AuthInfo] = {}
|
234 |
-
# When the user is prompted to enter credentials and keyring is
|
235 |
-
# available, we will offer to save them. If the user accepts,
|
236 |
-
# this value is set to the credentials they entered. After the
|
237 |
-
# request authenticates, the caller should call
|
238 |
-
# ``save_credentials`` to save these.
|
239 |
-
self._credentials_to_save: Optional[Credentials] = None
|
240 |
-
|
241 |
-
@property
|
242 |
-
def keyring_provider(self) -> KeyRingBaseProvider:
|
243 |
-
return get_keyring_provider(self._keyring_provider)
|
244 |
-
|
245 |
-
@keyring_provider.setter
|
246 |
-
def keyring_provider(self, provider: str) -> None:
|
247 |
-
# The free function get_keyring_provider has been decorated with
|
248 |
-
# functools.cache. If an exception occurs in get_keyring_auth that
|
249 |
-
# cache will be cleared and keyring disabled, take that into account
|
250 |
-
# if you want to remove this indirection.
|
251 |
-
self._keyring_provider = provider
|
252 |
-
|
253 |
-
@property
|
254 |
-
def use_keyring(self) -> bool:
|
255 |
-
# We won't use keyring when --no-input is passed unless
|
256 |
-
# a specific provider is requested because it might require
|
257 |
-
# user interaction
|
258 |
-
return self.prompting or self._keyring_provider not in ["auto", "disabled"]
|
259 |
-
|
260 |
-
def _get_keyring_auth(
|
261 |
-
self,
|
262 |
-
url: Optional[str],
|
263 |
-
username: Optional[str],
|
264 |
-
) -> Optional[AuthInfo]:
|
265 |
-
"""Return the tuple auth for a given url from keyring."""
|
266 |
-
# Do nothing if no url was provided
|
267 |
-
if not url:
|
268 |
-
return None
|
269 |
-
|
270 |
-
try:
|
271 |
-
return self.keyring_provider.get_auth_info(url, username)
|
272 |
-
except Exception as exc:
|
273 |
-
logger.warning(
|
274 |
-
"Keyring is skipped due to an exception: %s",
|
275 |
-
str(exc),
|
276 |
-
)
|
277 |
-
global KEYRING_DISABLED
|
278 |
-
KEYRING_DISABLED = True
|
279 |
-
get_keyring_provider.cache_clear()
|
280 |
-
return None
|
281 |
-
|
282 |
-
def _get_index_url(self, url: str) -> Optional[str]:
|
283 |
-
"""Return the original index URL matching the requested URL.
|
284 |
-
|
285 |
-
Cached or dynamically generated credentials may work against
|
286 |
-
the original index URL rather than just the netloc.
|
287 |
-
|
288 |
-
The provided url should have had its username and password
|
289 |
-
removed already. If the original index url had credentials then
|
290 |
-
they will be included in the return value.
|
291 |
-
|
292 |
-
Returns None if no matching index was found, or if --no-index
|
293 |
-
was specified by the user.
|
294 |
-
"""
|
295 |
-
if not url or not self.index_urls:
|
296 |
-
return None
|
297 |
-
|
298 |
-
url = remove_auth_from_url(url).rstrip("/") + "/"
|
299 |
-
parsed_url = urllib.parse.urlsplit(url)
|
300 |
-
|
301 |
-
candidates = []
|
302 |
-
|
303 |
-
for index in self.index_urls:
|
304 |
-
index = index.rstrip("/") + "/"
|
305 |
-
parsed_index = urllib.parse.urlsplit(remove_auth_from_url(index))
|
306 |
-
if parsed_url == parsed_index:
|
307 |
-
return index
|
308 |
-
|
309 |
-
if parsed_url.netloc != parsed_index.netloc:
|
310 |
-
continue
|
311 |
-
|
312 |
-
candidate = urllib.parse.urlsplit(index)
|
313 |
-
candidates.append(candidate)
|
314 |
-
|
315 |
-
if not candidates:
|
316 |
-
return None
|
317 |
-
|
318 |
-
candidates.sort(
|
319 |
-
reverse=True,
|
320 |
-
key=lambda candidate: commonprefix(
|
321 |
-
[
|
322 |
-
parsed_url.path,
|
323 |
-
candidate.path,
|
324 |
-
]
|
325 |
-
).rfind("/"),
|
326 |
-
)
|
327 |
-
|
328 |
-
return urllib.parse.urlunsplit(candidates[0])
|
329 |
-
|
330 |
-
def _get_new_credentials(
|
331 |
-
self,
|
332 |
-
original_url: str,
|
333 |
-
*,
|
334 |
-
allow_netrc: bool = True,
|
335 |
-
allow_keyring: bool = False,
|
336 |
-
) -> AuthInfo:
|
337 |
-
"""Find and return credentials for the specified URL."""
|
338 |
-
# Split the credentials and netloc from the url.
|
339 |
-
url, netloc, url_user_password = split_auth_netloc_from_url(
|
340 |
-
original_url,
|
341 |
-
)
|
342 |
-
|
343 |
-
# Start with the credentials embedded in the url
|
344 |
-
username, password = url_user_password
|
345 |
-
if username is not None and password is not None:
|
346 |
-
logger.debug("Found credentials in url for %s", netloc)
|
347 |
-
return url_user_password
|
348 |
-
|
349 |
-
# Find a matching index url for this request
|
350 |
-
index_url = self._get_index_url(url)
|
351 |
-
if index_url:
|
352 |
-
# Split the credentials from the url.
|
353 |
-
index_info = split_auth_netloc_from_url(index_url)
|
354 |
-
if index_info:
|
355 |
-
index_url, _, index_url_user_password = index_info
|
356 |
-
logger.debug("Found index url %s", index_url)
|
357 |
-
|
358 |
-
# If an index URL was found, try its embedded credentials
|
359 |
-
if index_url and index_url_user_password[0] is not None:
|
360 |
-
username, password = index_url_user_password
|
361 |
-
if username is not None and password is not None:
|
362 |
-
logger.debug("Found credentials in index url for %s", netloc)
|
363 |
-
return index_url_user_password
|
364 |
-
|
365 |
-
# Get creds from netrc if we still don't have them
|
366 |
-
if allow_netrc:
|
367 |
-
netrc_auth = get_netrc_auth(original_url)
|
368 |
-
if netrc_auth:
|
369 |
-
logger.debug("Found credentials in netrc for %s", netloc)
|
370 |
-
return netrc_auth
|
371 |
-
|
372 |
-
# If we don't have a password and keyring is available, use it.
|
373 |
-
if allow_keyring:
|
374 |
-
# The index url is more specific than the netloc, so try it first
|
375 |
-
# fmt: off
|
376 |
-
kr_auth = (
|
377 |
-
self._get_keyring_auth(index_url, username) or
|
378 |
-
self._get_keyring_auth(netloc, username)
|
379 |
-
)
|
380 |
-
# fmt: on
|
381 |
-
if kr_auth:
|
382 |
-
logger.debug("Found credentials in keyring for %s", netloc)
|
383 |
-
return kr_auth
|
384 |
-
|
385 |
-
return username, password
|
386 |
-
|
387 |
-
def _get_url_and_credentials(
|
388 |
-
self, original_url: str
|
389 |
-
) -> Tuple[str, Optional[str], Optional[str]]:
|
390 |
-
"""Return the credentials to use for the provided URL.
|
391 |
-
|
392 |
-
If allowed, netrc and keyring may be used to obtain the
|
393 |
-
correct credentials.
|
394 |
-
|
395 |
-
Returns (url_without_credentials, username, password). Note
|
396 |
-
that even if the original URL contains credentials, this
|
397 |
-
function may return a different username and password.
|
398 |
-
"""
|
399 |
-
url, netloc, _ = split_auth_netloc_from_url(original_url)
|
400 |
-
|
401 |
-
# Try to get credentials from original url
|
402 |
-
username, password = self._get_new_credentials(original_url)
|
403 |
-
|
404 |
-
# If credentials not found, use any stored credentials for this netloc.
|
405 |
-
# Do this if either the username or the password is missing.
|
406 |
-
# This accounts for the situation in which the user has specified
|
407 |
-
# the username in the index url, but the password comes from keyring.
|
408 |
-
if (username is None or password is None) and netloc in self.passwords:
|
409 |
-
un, pw = self.passwords[netloc]
|
410 |
-
# It is possible that the cached credentials are for a different username,
|
411 |
-
# in which case the cache should be ignored.
|
412 |
-
if username is None or username == un:
|
413 |
-
username, password = un, pw
|
414 |
-
|
415 |
-
if username is not None or password is not None:
|
416 |
-
# Convert the username and password if they're None, so that
|
417 |
-
# this netloc will show up as "cached" in the conditional above.
|
418 |
-
# Further, HTTPBasicAuth doesn't accept None, so it makes sense to
|
419 |
-
# cache the value that is going to be used.
|
420 |
-
username = username or ""
|
421 |
-
password = password or ""
|
422 |
-
|
423 |
-
# Store any acquired credentials.
|
424 |
-
self.passwords[netloc] = (username, password)
|
425 |
-
|
426 |
-
assert (
|
427 |
-
# Credentials were found
|
428 |
-
(username is not None and password is not None)
|
429 |
-
# Credentials were not found
|
430 |
-
or (username is None and password is None)
|
431 |
-
), f"Could not load credentials from url: {original_url}"
|
432 |
-
|
433 |
-
return url, username, password
|
434 |
-
|
435 |
-
def __call__(self, req: Request) -> Request:
|
436 |
-
# Get credentials for this request
|
437 |
-
url, username, password = self._get_url_and_credentials(req.url)
|
438 |
-
|
439 |
-
# Set the url of the request to the url without any credentials
|
440 |
-
req.url = url
|
441 |
-
|
442 |
-
if username is not None and password is not None:
|
443 |
-
# Send the basic auth with this request
|
444 |
-
req = HTTPBasicAuth(username, password)(req)
|
445 |
-
|
446 |
-
# Attach a hook to handle 401 responses
|
447 |
-
req.register_hook("response", self.handle_401)
|
448 |
-
|
449 |
-
return req
|
450 |
-
|
451 |
-
# Factored out to allow for easy patching in tests
|
452 |
-
def _prompt_for_password(
|
453 |
-
self, netloc: str
|
454 |
-
) -> Tuple[Optional[str], Optional[str], bool]:
|
455 |
-
username = ask_input(f"User for {netloc}: ") if self.prompting else None
|
456 |
-
if not username:
|
457 |
-
return None, None, False
|
458 |
-
if self.use_keyring:
|
459 |
-
auth = self._get_keyring_auth(netloc, username)
|
460 |
-
if auth and auth[0] is not None and auth[1] is not None:
|
461 |
-
return auth[0], auth[1], False
|
462 |
-
password = ask_password("Password: ")
|
463 |
-
return username, password, True
|
464 |
-
|
465 |
-
# Factored out to allow for easy patching in tests
|
466 |
-
def _should_save_password_to_keyring(self) -> bool:
|
467 |
-
if (
|
468 |
-
not self.prompting
|
469 |
-
or not self.use_keyring
|
470 |
-
or not self.keyring_provider.has_keyring
|
471 |
-
):
|
472 |
-
return False
|
473 |
-
return ask("Save credentials to keyring [y/N]: ", ["y", "n"]) == "y"
|
474 |
-
|
475 |
-
def handle_401(self, resp: Response, **kwargs: Any) -> Response:
|
476 |
-
# We only care about 401 responses, anything else we want to just
|
477 |
-
# pass through the actual response
|
478 |
-
if resp.status_code != 401:
|
479 |
-
return resp
|
480 |
-
|
481 |
-
username, password = None, None
|
482 |
-
|
483 |
-
# Query the keyring for credentials:
|
484 |
-
if self.use_keyring:
|
485 |
-
username, password = self._get_new_credentials(
|
486 |
-
resp.url,
|
487 |
-
allow_netrc=False,
|
488 |
-
allow_keyring=True,
|
489 |
-
)
|
490 |
-
|
491 |
-
# We are not able to prompt the user so simply return the response
|
492 |
-
if not self.prompting and not username and not password:
|
493 |
-
return resp
|
494 |
-
|
495 |
-
parsed = urllib.parse.urlparse(resp.url)
|
496 |
-
|
497 |
-
# Prompt the user for a new username and password
|
498 |
-
save = False
|
499 |
-
if not username and not password:
|
500 |
-
username, password, save = self._prompt_for_password(parsed.netloc)
|
501 |
-
|
502 |
-
# Store the new username and password to use for future requests
|
503 |
-
self._credentials_to_save = None
|
504 |
-
if username is not None and password is not None:
|
505 |
-
self.passwords[parsed.netloc] = (username, password)
|
506 |
-
|
507 |
-
# Prompt to save the password to keyring
|
508 |
-
if save and self._should_save_password_to_keyring():
|
509 |
-
self._credentials_to_save = Credentials(
|
510 |
-
url=parsed.netloc,
|
511 |
-
username=username,
|
512 |
-
password=password,
|
513 |
-
)
|
514 |
-
|
515 |
-
# Consume content and release the original connection to allow our new
|
516 |
-
# request to reuse the same one.
|
517 |
-
resp.content
|
518 |
-
resp.raw.release_conn()
|
519 |
-
|
520 |
-
# Add our new username and password to the request
|
521 |
-
req = HTTPBasicAuth(username or "", password or "")(resp.request)
|
522 |
-
req.register_hook("response", self.warn_on_401)
|
523 |
-
|
524 |
-
# On successful request, save the credentials that were used to
|
525 |
-
# keyring. (Note that if the user responded "no" above, this member
|
526 |
-
# is not set and nothing will be saved.)
|
527 |
-
if self._credentials_to_save:
|
528 |
-
req.register_hook("response", self.save_credentials)
|
529 |
-
|
530 |
-
# Send our new request
|
531 |
-
new_resp = resp.connection.send(req, **kwargs)
|
532 |
-
new_resp.history.append(resp)
|
533 |
-
|
534 |
-
return new_resp
|
535 |
-
|
536 |
-
def warn_on_401(self, resp: Response, **kwargs: Any) -> None:
|
537 |
-
"""Response callback to warn about incorrect credentials."""
|
538 |
-
if resp.status_code == 401:
|
539 |
-
logger.warning(
|
540 |
-
"401 Error, Credentials not correct for %s",
|
541 |
-
resp.request.url,
|
542 |
-
)
|
543 |
-
|
544 |
-
def save_credentials(self, resp: Response, **kwargs: Any) -> None:
|
545 |
-
"""Response callback to save credentials on success."""
|
546 |
-
assert (
|
547 |
-
self.keyring_provider.has_keyring
|
548 |
-
), "should never reach here without keyring"
|
549 |
-
|
550 |
-
creds = self._credentials_to_save
|
551 |
-
self._credentials_to_save = None
|
552 |
-
if creds and resp.status_code < 400:
|
553 |
-
try:
|
554 |
-
logger.info("Saving credentials to keyring")
|
555 |
-
self.keyring_provider.save_auth_info(
|
556 |
-
creds.url, creds.username, creds.password
|
557 |
-
)
|
558 |
-
except Exception:
|
559 |
-
logger.exception("Failed to save credentials")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/requests/exceptions.py
DELETED
@@ -1,141 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
requests.exceptions
|
3 |
-
~~~~~~~~~~~~~~~~~~~
|
4 |
-
|
5 |
-
This module contains the set of Requests' exceptions.
|
6 |
-
"""
|
7 |
-
from pip._vendor.urllib3.exceptions import HTTPError as BaseHTTPError
|
8 |
-
|
9 |
-
from .compat import JSONDecodeError as CompatJSONDecodeError
|
10 |
-
|
11 |
-
|
12 |
-
class RequestException(IOError):
|
13 |
-
"""There was an ambiguous exception that occurred while handling your
|
14 |
-
request.
|
15 |
-
"""
|
16 |
-
|
17 |
-
def __init__(self, *args, **kwargs):
|
18 |
-
"""Initialize RequestException with `request` and `response` objects."""
|
19 |
-
response = kwargs.pop("response", None)
|
20 |
-
self.response = response
|
21 |
-
self.request = kwargs.pop("request", None)
|
22 |
-
if response is not None and not self.request and hasattr(response, "request"):
|
23 |
-
self.request = self.response.request
|
24 |
-
super().__init__(*args, **kwargs)
|
25 |
-
|
26 |
-
|
27 |
-
class InvalidJSONError(RequestException):
|
28 |
-
"""A JSON error occurred."""
|
29 |
-
|
30 |
-
|
31 |
-
class JSONDecodeError(InvalidJSONError, CompatJSONDecodeError):
|
32 |
-
"""Couldn't decode the text into json"""
|
33 |
-
|
34 |
-
def __init__(self, *args, **kwargs):
|
35 |
-
"""
|
36 |
-
Construct the JSONDecodeError instance first with all
|
37 |
-
args. Then use it's args to construct the IOError so that
|
38 |
-
the json specific args aren't used as IOError specific args
|
39 |
-
and the error message from JSONDecodeError is preserved.
|
40 |
-
"""
|
41 |
-
CompatJSONDecodeError.__init__(self, *args)
|
42 |
-
InvalidJSONError.__init__(self, *self.args, **kwargs)
|
43 |
-
|
44 |
-
|
45 |
-
class HTTPError(RequestException):
|
46 |
-
"""An HTTP error occurred."""
|
47 |
-
|
48 |
-
|
49 |
-
class ConnectionError(RequestException):
|
50 |
-
"""A Connection error occurred."""
|
51 |
-
|
52 |
-
|
53 |
-
class ProxyError(ConnectionError):
|
54 |
-
"""A proxy error occurred."""
|
55 |
-
|
56 |
-
|
57 |
-
class SSLError(ConnectionError):
|
58 |
-
"""An SSL error occurred."""
|
59 |
-
|
60 |
-
|
61 |
-
class Timeout(RequestException):
|
62 |
-
"""The request timed out.
|
63 |
-
|
64 |
-
Catching this error will catch both
|
65 |
-
:exc:`~requests.exceptions.ConnectTimeout` and
|
66 |
-
:exc:`~requests.exceptions.ReadTimeout` errors.
|
67 |
-
"""
|
68 |
-
|
69 |
-
|
70 |
-
class ConnectTimeout(ConnectionError, Timeout):
|
71 |
-
"""The request timed out while trying to connect to the remote server.
|
72 |
-
|
73 |
-
Requests that produced this error are safe to retry.
|
74 |
-
"""
|
75 |
-
|
76 |
-
|
77 |
-
class ReadTimeout(Timeout):
|
78 |
-
"""The server did not send any data in the allotted amount of time."""
|
79 |
-
|
80 |
-
|
81 |
-
class URLRequired(RequestException):
|
82 |
-
"""A valid URL is required to make a request."""
|
83 |
-
|
84 |
-
|
85 |
-
class TooManyRedirects(RequestException):
|
86 |
-
"""Too many redirects."""
|
87 |
-
|
88 |
-
|
89 |
-
class MissingSchema(RequestException, ValueError):
|
90 |
-
"""The URL scheme (e.g. http or https) is missing."""
|
91 |
-
|
92 |
-
|
93 |
-
class InvalidSchema(RequestException, ValueError):
|
94 |
-
"""The URL scheme provided is either invalid or unsupported."""
|
95 |
-
|
96 |
-
|
97 |
-
class InvalidURL(RequestException, ValueError):
|
98 |
-
"""The URL provided was somehow invalid."""
|
99 |
-
|
100 |
-
|
101 |
-
class InvalidHeader(RequestException, ValueError):
|
102 |
-
"""The header value provided was somehow invalid."""
|
103 |
-
|
104 |
-
|
105 |
-
class InvalidProxyURL(InvalidURL):
|
106 |
-
"""The proxy URL provided is invalid."""
|
107 |
-
|
108 |
-
|
109 |
-
class ChunkedEncodingError(RequestException):
|
110 |
-
"""The server declared chunked encoding but sent an invalid chunk."""
|
111 |
-
|
112 |
-
|
113 |
-
class ContentDecodingError(RequestException, BaseHTTPError):
|
114 |
-
"""Failed to decode response content."""
|
115 |
-
|
116 |
-
|
117 |
-
class StreamConsumedError(RequestException, TypeError):
|
118 |
-
"""The content for this response was already consumed."""
|
119 |
-
|
120 |
-
|
121 |
-
class RetryError(RequestException):
|
122 |
-
"""Custom retries logic failed"""
|
123 |
-
|
124 |
-
|
125 |
-
class UnrewindableBodyError(RequestException):
|
126 |
-
"""Requests encountered an error when trying to rewind a body."""
|
127 |
-
|
128 |
-
|
129 |
-
# Warnings
|
130 |
-
|
131 |
-
|
132 |
-
class RequestsWarning(Warning):
|
133 |
-
"""Base warning for Requests."""
|
134 |
-
|
135 |
-
|
136 |
-
class FileModeWarning(RequestsWarning, DeprecationWarning):
|
137 |
-
"""A file was opened in text mode, but Requests determined its binary length."""
|
138 |
-
|
139 |
-
|
140 |
-
class RequestsDependencyWarning(RequestsWarning):
|
141 |
-
"""An imported dependency doesn't match the expected version range."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/models/fcos.py
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
from detectron2.modeling.meta_arch.fcos import FCOS, FCOSHead
|
2 |
-
|
3 |
-
from .retinanet import model
|
4 |
-
|
5 |
-
model._target_ = FCOS
|
6 |
-
|
7 |
-
del model.anchor_generator
|
8 |
-
del model.box2box_transform
|
9 |
-
del model.anchor_matcher
|
10 |
-
del model.input_format
|
11 |
-
|
12 |
-
# Use P5 instead of C5 to compute P6/P7
|
13 |
-
# (Sec 2.2 of https://arxiv.org/abs/2006.09214)
|
14 |
-
model.backbone.top_block.in_feature = "p5"
|
15 |
-
model.backbone.top_block.in_channels = 256
|
16 |
-
|
17 |
-
# New score threshold determined based on sqrt(cls_score * centerness)
|
18 |
-
model.test_score_thresh = 0.2
|
19 |
-
model.test_nms_thresh = 0.6
|
20 |
-
|
21 |
-
model.head._target_ = FCOSHead
|
22 |
-
del model.head.num_anchors
|
23 |
-
model.head.norm = "GN"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AzumaSeren100/XuanShen-Bert-VITS2/app.py
DELETED
@@ -1,144 +0,0 @@
|
|
1 |
-
import sys, os
|
2 |
-
|
3 |
-
if sys.platform == "darwin":
|
4 |
-
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
|
5 |
-
|
6 |
-
import logging
|
7 |
-
|
8 |
-
logging.getLogger("numba").setLevel(logging.WARNING)
|
9 |
-
logging.getLogger("markdown_it").setLevel(logging.WARNING)
|
10 |
-
logging.getLogger("urllib3").setLevel(logging.WARNING)
|
11 |
-
logging.getLogger("matplotlib").setLevel(logging.WARNING)
|
12 |
-
|
13 |
-
logging.basicConfig(level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s")
|
14 |
-
|
15 |
-
logger = logging.getLogger(__name__)
|
16 |
-
|
17 |
-
import torch
|
18 |
-
import argparse
|
19 |
-
import commons
|
20 |
-
import utils
|
21 |
-
from models import SynthesizerTrn
|
22 |
-
from text.symbols import symbols
|
23 |
-
from text import cleaned_text_to_sequence, get_bert
|
24 |
-
from text.cleaner import clean_text
|
25 |
-
import gradio as gr
|
26 |
-
import webbrowser
|
27 |
-
|
28 |
-
|
29 |
-
net_g = None
|
30 |
-
|
31 |
-
|
32 |
-
def get_text(text, language_str, hps):
|
33 |
-
norm_text, phone, tone, word2ph = clean_text(text, language_str)
|
34 |
-
phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
|
35 |
-
|
36 |
-
if hps.data.add_blank:
|
37 |
-
phone = commons.intersperse(phone, 0)
|
38 |
-
tone = commons.intersperse(tone, 0)
|
39 |
-
language = commons.intersperse(language, 0)
|
40 |
-
for i in range(len(word2ph)):
|
41 |
-
word2ph[i] = word2ph[i] * 2
|
42 |
-
word2ph[0] += 1
|
43 |
-
bert = get_bert(norm_text, word2ph, language_str)
|
44 |
-
del word2ph
|
45 |
-
|
46 |
-
assert bert.shape[-1] == len(phone)
|
47 |
-
|
48 |
-
phone = torch.LongTensor(phone)
|
49 |
-
tone = torch.LongTensor(tone)
|
50 |
-
language = torch.LongTensor(language)
|
51 |
-
|
52 |
-
return bert, phone, tone, language
|
53 |
-
|
54 |
-
def infer(text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid):
|
55 |
-
global net_g
|
56 |
-
bert, phones, tones, lang_ids = get_text(text, "ZH", hps)
|
57 |
-
with torch.no_grad():
|
58 |
-
x_tst=phones.to(device).unsqueeze(0)
|
59 |
-
tones=tones.to(device).unsqueeze(0)
|
60 |
-
lang_ids=lang_ids.to(device).unsqueeze(0)
|
61 |
-
bert = bert.to(device).unsqueeze(0)
|
62 |
-
x_tst_lengths = torch.LongTensor([phones.size(0)]).to(device)
|
63 |
-
del phones
|
64 |
-
speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(device)
|
65 |
-
audio = net_g.infer(x_tst, x_tst_lengths, speakers, tones, lang_ids, bert, sdp_ratio=sdp_ratio
|
66 |
-
, noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale)[0][0,0].data.cpu().float().numpy()
|
67 |
-
del x_tst, tones, lang_ids, bert, x_tst_lengths, speakers
|
68 |
-
return audio
|
69 |
-
|
70 |
-
def tts_fn(text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale):
|
71 |
-
with torch.no_grad():
|
72 |
-
audio = infer(text, sdp_ratio=sdp_ratio, noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale, sid=speaker)
|
73 |
-
return "Success", (hps.data.sampling_rate, audio)
|
74 |
-
|
75 |
-
|
76 |
-
if __name__ == "__main__":
|
77 |
-
parser = argparse.ArgumentParser()
|
78 |
-
# parser.add_argument("-m", "--model", default="./logs/dxl/G21200.pth", help="path of your model")
|
79 |
-
parser.add_argument("-mn", "--model_name", default="xuanshen", help="path of your model")
|
80 |
-
parser.add_argument("-m", "--model", default="null", help="path of your model")
|
81 |
-
parser.add_argument("-c", "--config", default="./configs/config.json", help="path of your config file")
|
82 |
-
parser.add_argument("--share", default=True, help="make link public")
|
83 |
-
parser.add_argument("-d", "--debug", action="store_true", help="enable DEBUG-LEVEL log")
|
84 |
-
|
85 |
-
args = parser.parse_args()
|
86 |
-
if args.debug:
|
87 |
-
logger.info("Enable DEBUG-LEVEL log")
|
88 |
-
logging.basicConfig(level=logging.DEBUG)
|
89 |
-
hps = utils.get_hparams_from_file(args.config)
|
90 |
-
|
91 |
-
device = (
|
92 |
-
"cuda:0"
|
93 |
-
if torch.cuda.is_available()
|
94 |
-
else (
|
95 |
-
"mps"
|
96 |
-
if sys.platform == "darwin" and torch.backends.mps.is_available()
|
97 |
-
else "cpu"
|
98 |
-
)
|
99 |
-
)
|
100 |
-
net_g = SynthesizerTrn(
|
101 |
-
len(symbols),
|
102 |
-
hps.data.filter_length // 2 + 1,
|
103 |
-
hps.train.segment_size // hps.data.hop_length,
|
104 |
-
n_speakers=hps.data.n_speakers,
|
105 |
-
**hps.model).to(device)
|
106 |
-
_ = net_g.eval()
|
107 |
-
|
108 |
-
model_path = args.model
|
109 |
-
if not os.path.exists(model_path) or model_path == "null":
|
110 |
-
model_path = utils.latest_checkpoint_path(os.path.join("./logs/",args.model_name), "G_*.pth")
|
111 |
-
|
112 |
-
_ = utils.load_checkpoint(model_path, net_g, None, skip_optimizer=True)
|
113 |
-
|
114 |
-
speaker_ids = hps.data.spk2id
|
115 |
-
speakers = list(speaker_ids.keys())
|
116 |
-
with gr.Blocks() as app:
|
117 |
-
with gr.Row():
|
118 |
-
with gr.Column():
|
119 |
-
gr.Markdown(value="""
|
120 |
-
炫神Bert-vits2语音在线生成\n
|
121 |
-
作者:东洋雪莲 https://space.bilibili.com/1060544882\n
|
122 |
-
声音归属:炫神 https://space.bilibili.com/299013902\n
|
123 |
-
Bert-VITS2项目:https://github.com/Stardust-minus/Bert-VITS2\n
|
124 |
-
使用请严格遵守法律法规!\n
|
125 |
-
二创请标注项目链接、简介声明使用Bert-VITS2生成\n
|
126 |
-
""")
|
127 |
-
text = gr.TextArea(label="Text", placeholder="Input Text Here",
|
128 |
-
value="你妈的我��死你!")
|
129 |
-
speaker = gr.Dropdown(choices=speakers, value=speakers[0], label='Speaker')
|
130 |
-
sdp_ratio = gr.Slider(minimum=0, maximum=1, value=0.2, step=0.1, label='SDP Ratio')
|
131 |
-
noise_scale = gr.Slider(minimum=0.1, maximum=2, value=0.6, step=0.1, label='Noise Scale')
|
132 |
-
noise_scale_w = gr.Slider(minimum=0.1, maximum=2, value=0.9, step=0.1, label='Noise Scale W')
|
133 |
-
length_scale = gr.Slider(minimum=0.1, maximum=2, value=1, step=0.1, label='Length Scale')
|
134 |
-
btn = gr.Button("Generate!", variant="primary")
|
135 |
-
with gr.Column():
|
136 |
-
text_output = gr.Textbox(label="Message")
|
137 |
-
audio_output = gr.Audio(label="Output Audio")
|
138 |
-
|
139 |
-
btn.click(tts_fn,
|
140 |
-
inputs=[text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale],
|
141 |
-
outputs=[text_output, audio_output])
|
142 |
-
|
143 |
-
webbrowser.open("http://127.0.0.1:7860")
|
144 |
-
app.launch(share=args.share)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/layers_537227KB.py
DELETED
@@ -1,126 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn.functional as F
|
3 |
-
from torch import nn
|
4 |
-
|
5 |
-
from . import spec_utils
|
6 |
-
|
7 |
-
|
8 |
-
class Conv2DBNActiv(nn.Module):
|
9 |
-
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
10 |
-
super(Conv2DBNActiv, self).__init__()
|
11 |
-
self.conv = nn.Sequential(
|
12 |
-
nn.Conv2d(
|
13 |
-
nin,
|
14 |
-
nout,
|
15 |
-
kernel_size=ksize,
|
16 |
-
stride=stride,
|
17 |
-
padding=pad,
|
18 |
-
dilation=dilation,
|
19 |
-
bias=False,
|
20 |
-
),
|
21 |
-
nn.BatchNorm2d(nout),
|
22 |
-
activ(),
|
23 |
-
)
|
24 |
-
|
25 |
-
def __call__(self, x):
|
26 |
-
return self.conv(x)
|
27 |
-
|
28 |
-
|
29 |
-
class SeperableConv2DBNActiv(nn.Module):
|
30 |
-
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
31 |
-
super(SeperableConv2DBNActiv, self).__init__()
|
32 |
-
self.conv = nn.Sequential(
|
33 |
-
nn.Conv2d(
|
34 |
-
nin,
|
35 |
-
nin,
|
36 |
-
kernel_size=ksize,
|
37 |
-
stride=stride,
|
38 |
-
padding=pad,
|
39 |
-
dilation=dilation,
|
40 |
-
groups=nin,
|
41 |
-
bias=False,
|
42 |
-
),
|
43 |
-
nn.Conv2d(nin, nout, kernel_size=1, bias=False),
|
44 |
-
nn.BatchNorm2d(nout),
|
45 |
-
activ(),
|
46 |
-
)
|
47 |
-
|
48 |
-
def __call__(self, x):
|
49 |
-
return self.conv(x)
|
50 |
-
|
51 |
-
|
52 |
-
class Encoder(nn.Module):
|
53 |
-
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
|
54 |
-
super(Encoder, self).__init__()
|
55 |
-
self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
56 |
-
self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
|
57 |
-
|
58 |
-
def __call__(self, x):
|
59 |
-
skip = self.conv1(x)
|
60 |
-
h = self.conv2(skip)
|
61 |
-
|
62 |
-
return h, skip
|
63 |
-
|
64 |
-
|
65 |
-
class Decoder(nn.Module):
|
66 |
-
def __init__(
|
67 |
-
self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
|
68 |
-
):
|
69 |
-
super(Decoder, self).__init__()
|
70 |
-
self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
71 |
-
self.dropout = nn.Dropout2d(0.1) if dropout else None
|
72 |
-
|
73 |
-
def __call__(self, x, skip=None):
|
74 |
-
x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
|
75 |
-
if skip is not None:
|
76 |
-
skip = spec_utils.crop_center(skip, x)
|
77 |
-
x = torch.cat([x, skip], dim=1)
|
78 |
-
h = self.conv(x)
|
79 |
-
|
80 |
-
if self.dropout is not None:
|
81 |
-
h = self.dropout(h)
|
82 |
-
|
83 |
-
return h
|
84 |
-
|
85 |
-
|
86 |
-
class ASPPModule(nn.Module):
|
87 |
-
def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU):
|
88 |
-
super(ASPPModule, self).__init__()
|
89 |
-
self.conv1 = nn.Sequential(
|
90 |
-
nn.AdaptiveAvgPool2d((1, None)),
|
91 |
-
Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
|
92 |
-
)
|
93 |
-
self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
|
94 |
-
self.conv3 = SeperableConv2DBNActiv(
|
95 |
-
nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
|
96 |
-
)
|
97 |
-
self.conv4 = SeperableConv2DBNActiv(
|
98 |
-
nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
|
99 |
-
)
|
100 |
-
self.conv5 = SeperableConv2DBNActiv(
|
101 |
-
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
102 |
-
)
|
103 |
-
self.conv6 = SeperableConv2DBNActiv(
|
104 |
-
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
105 |
-
)
|
106 |
-
self.conv7 = SeperableConv2DBNActiv(
|
107 |
-
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
108 |
-
)
|
109 |
-
self.bottleneck = nn.Sequential(
|
110 |
-
Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
|
111 |
-
)
|
112 |
-
|
113 |
-
def forward(self, x):
|
114 |
-
_, _, h, w = x.size()
|
115 |
-
feat1 = F.interpolate(
|
116 |
-
self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
|
117 |
-
)
|
118 |
-
feat2 = self.conv2(x)
|
119 |
-
feat3 = self.conv3(x)
|
120 |
-
feat4 = self.conv4(x)
|
121 |
-
feat5 = self.conv5(x)
|
122 |
-
feat6 = self.conv6(x)
|
123 |
-
feat7 = self.conv7(x)
|
124 |
-
out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1)
|
125 |
-
bottle = self.bottleneck(out)
|
126 |
-
return bottle
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Bombsquad Mod Apk ltima Versin.md
DELETED
@@ -1,97 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>BombSquad Mod APK última versión: Todo lo que necesita saber</h1>
|
3 |
-
<p>¿Te encanta volar cosas con tus amigos? ¿Te gusta jugar minijuegos que involucran piratas, ninjas, bárbaros y chefs locos? Si respondiste afirmativamente a cualquiera de estas preguntas, quizás quieras echar un vistazo a BombSquad, un divertido y explosivo juego multijugador que te mantendrá entretenido durante horas. Y si quieres hacer el juego aún más emocionante, se puede descargar el BombSquad Mod APK última versión, que le da acceso a todas las características desbloqueadas y recursos ilimitados. En este artículo, le diremos todo lo que necesita saber sobre BombSquad y su versión modded, incluyendo cómo descargar, instalar y jugar. </p>
|
4 |
-
<h2>bombsquad mod apk última versión</h2><br /><p><b><b>DOWNLOAD</b> > <a href="https://bltlly.com/2v6Jyb">https://bltlly.com/2v6Jyb</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es BombSquad? </h2>
|
6 |
-
<h3>Un divertido y explosivo juego multijugador</h3>
|
7 |
-
<p>BombSquad es un juego lleno de acción que te permite volar a tus amigos en varios minijuegos que van desde la captura de la bandera de hockey. Puedes jugar con hasta 8 jugadores a nivel local o online, usando tus dispositivos como controladores. El juego cuenta con la física avanzada ragdoll, explosiones gratuitas y personajes hilarantes que te harán reír en voz alta. También puedes personalizar tu personaje con diferentes atuendos, accesorios y burlas. </p>
|
8 |
-
<h3>Características de BombSquad</h3>
|
9 |
-
<p>Algunas de las características que hacen de BombSquad un gran juego son:</p>
|
10 |
-
<ul>
|
11 |
-
<li>Más de 20 minijuegos diferentes para elegir, como King of the Hill, Bomber Hockey, Capture the Flag, Epic Slow Motion Elimination y más. </li>
|
12 |
-
<li>Una variedad de mapas y entornos para explorar, como islas, castillos, estadios y barcos piratas. </li>
|
13 |
-
<li>Una amplia gama de bombas y armas para usar, como bombas pegajosas, bombas de hielo, guantes de boxeo, minas terrestres y más. </li>
|
14 |
-
<li>Una interfaz divertida y fácil de usar que te permite crear tus propios minijuegos y compartirlos con otros jugadores. </li>
|
15 |
-
<li>Una banda sonora que coincide con el estado de ánimo y la intensidad del juego. </li>
|
16 |
-
</ul>
|
17 |
-
<h2>¿Qué es BombSquad Mod APK? </h2>
|
18 |
-
|
19 |
-
<p>BombSquad Mod APK es una versión modificada del juego original que le da algunos beneficios y ventajas adicionales. No es una aplicación oficial del desarrollador, sino una aplicación de terceros que ha sido modificada por algunas fuentes desconocidas. Al descargar e instalar BombSquad Mod APK, puede disfrutar de las siguientes características:</p>
|
20 |
-
<h3>Beneficios de BombSquad Mod APK</h3>
|
21 |
-
<p>Algunos de los beneficios que BombSquad Mod APK ofrece son:</p>
|
22 |
-
<ul>
|
23 |
-
<li> Todos los caracteres están desbloqueados, por lo que puede jugar como cualquier personaje que desee. </li>
|
24 |
-
<li> Todos los minijuegos están desbloqueados, por lo que puede jugar cualquier mini-juego que desee. </li>
|
25 |
-
<li> Todos los mapas están desbloqueados, por lo que puede explorar cualquier mapa que desee. </li>
|
26 |
-
<li> Todas las bombas y armas están desbloqueadas, por lo que puede utilizar cualquier bomba o arma que desee. </li>
|
27 |
-
<li>Tienes entradas ilimitadas, que se utilizan para comprar artículos en el juego. </li>
|
28 |
-
<li>Tienes salud ilimitada, lo que significa que no morirás fácilmente en el juego. </li>
|
29 |
-
<li>No tienes anuncios, lo que significa que no serás interrumpido por anuncios molestos mientras juegas el juego. </li>
|
30 |
-
</ul>
|
31 |
-
<h2>¿Cómo descargar e instalar BombSquad Mod APK? </h2>
|
32 |
-
<h3>Pasos para descargar e instalar BombSquad Mod APK</h3>
|
33 |
-
<p>Si desea descargar e instalar BombSquad Mod APK en su dispositivo, es necesario seguir estos pasos:</p>
|
34 |
-
<p></p>
|
35 |
-
<ol>
|
36 |
-
<li <li>Ir a un sitio web de confianza que proporciona el enlace para descargar BombSquad Mod APK. Puede buscar "BombSquad Mod APK download" en Bing y elegir uno de los resultados. Por ejemplo, puede utilizar este enlace: </li>
|
37 |
-
<li>Haga clic en el botón de descarga y espere a que el archivo se descargue en su dispositivo. El tamaño del archivo es de unos 60 MB, así que asegúrese de tener suficiente espacio y una conexión a Internet estable. </li>
|
38 |
-
<li>Una vez que el archivo se descarga, ir a la configuración del dispositivo y permitir la opción de instalar aplicaciones de fuentes desconocidas. Esto es necesario porque BombSquad Mod APK no es de la Google Play Store y su dispositivo podría bloquearlo de lo contrario. </li>
|
39 |
-
|
40 |
-
<li> Después de la instalación se hace, puede iniciar el juego y disfrutar de BombSquad Mod APK en su dispositivo. </li>
|
41 |
-
</ol>
|
42 |
-
<h3>Consejos para evitar malware y virus</h3>
|
43 |
-
<p>Si bien BombSquad Mod APK es un juego divertido y emocionante, también debe tener cuidado con la descarga e instalación de fuentes desconocidas. Algunos sitios web pueden proporcionar archivos falsos o dañados que pueden dañar su dispositivo o robar su información personal. Aquí hay algunos consejos para evitar el malware y los virus al descargar BombSquad Mod APK:</p>
|
44 |
-
<ul>
|
45 |
-
<li>Siempre use un software antivirus confiable en su dispositivo y escanee el archivo antes de instalarlo. </li>
|
46 |
-
<li>Compruebe siempre las opiniones y calificaciones del sitio web y el archivo antes de descargarlo. </li>
|
47 |
-
<li>Siempre compare el tamaño del archivo y el nombre con el juego original y asegúrese de que coincidan. </li>
|
48 |
-
<li>Siempre copia de seguridad de los datos y el dispositivo antes de instalar cualquier aplicación modded. </li>
|
49 |
-
<li>Siempre desinstale el juego original antes de instalar la versión modificada para evitar conflictos o errores. </li>
|
50 |
-
</ul>
|
51 |
-
<h2>¿Cómo se juega BombSquad Mod APK? </h2>
|
52 |
-
<h3>Modos de juego y minijuegos</h3>
|
53 |
-
<p>BombSquad Mod APK le ofrece una variedad de modos de juego y mini-juegos para jugar con sus amigos u otros jugadores en línea. Usted puede elegir entre:</p>
|
54 |
-
<ul>
|
55 |
-
<li>Modo cooperativo: puedes formar equipo con otros jugadores y trabajar juntos para completar misiones y desafíos. </li>
|
56 |
-
<li>Modo versus: Puedes competir con otros jugadores e intentar derrotarlos en diferentes minijuegos. </li>
|
57 |
-
<li>Modo de torneo: Puedes unirte a un torneo e intentar ganar premios y trofeos. </li>
|
58 |
-
<li>Modo solitario: Puedes jugar solo y practicar tus habilidades o poner a prueba tus límites. </li>
|
59 |
-
</ul>
|
60 |
-
<p>Algunos de los mini-juegos que se pueden jugar en BombSquad Mod APK son:</p>
|
61 |
-
<tabla>
|
62 |
-
<tr><th>Nombre</th><th>Descripción</th></tr>
|
63 |
-
<tr><td>Captura la bandera</td><td>Tienes que capturar la bandera del enemigo y llevarla de vuelta a tu base mientras defiendes tu propia bandera. </td></tr>
|
64 |
-
|
65 |
-
<tr><td>Epic Slow Motion Elimination</td><td>Tienes que eliminar a todos los demás jugadores lanzándoles bombas mientras esquivan sus bombas en cámara lenta. </td></tr>
|
66 |
-
<tr><td>Ninja Fight</td><td>Tienes que luchar con otros ninjas usando espadas, shurikens y bombas mientras saltas en plataformas. </td></tr>
|
67 |
-
<tr><td>Pirate Plunder</td><td>Tienes que recoger tantas monedas como sea posible mientras navegas en un barco pirata y evitas las balas de cañón y los tiburones. </td></tr>
|
68 |
-
</tabla>
|
69 |
-
<h3>Controles y ajustes</h3>
|
70 |
-
<p>BombSquad Mod APK tiene controles simples e intuitivos que le permiten jugar el juego con facilidad. Puede usar su dispositivo como controlador o conectar un controlador externo a través de Bluetooth o USB. También puede personalizar sus controles en el menú de configuración. Los controles básicos son:</p>
|
71 |
-
<ul>
|
72 |
-
<li>Mover: Usa el joystick izquierdo o inclina tu dispositivo para mover a tu personaje. </li>
|
73 |
-
<li>Saltar: Pulse o pulse el botón A para saltar. </li>
|
74 |
-
<li>Pick up/Throw: Toque o pulse el botón B para recoger o lanzar bombas, armas, banderas, etc.</li>
|
75 |
-
<li>Punch: Toque o presione el botón X para perforar o usar armas. </li>
|
76 |
-
<li>Bomba: Pulse o pulse el botón Y para lanzar una bomba. </li>
|
77 |
-
</ul>
|
78 |
-
<p>También puedes ajustar otros ajustes en el juego, como sonido, gráficos, idioma, red, etc. También puedes crear tu propio perfil y personalizar tu personaje en el juego. </p>
|
79 |
-
<h2>Conclusión</h2>
|
80 |
-
<h3>Resumen de los puntos principales</h3>
|
81 |
-
|
82 |
-
<h3>Preguntas frecuentes</h3>
|
83 |
-
<p>Aquí hay algunas preguntas frecuentes sobre BombSquad Mod APK:</p>
|
84 |
-
<ul>
|
85 |
-
<li>Q: ¿Es BombSquad Mod APK seguro de usar? </li>
|
86 |
-
<li>A: BombSquad Mod APK es generalmente seguro de usar, pero siempre debe descargarlo desde un sitio web de confianza y escanearlo con un software antivirus antes de instalarlo. También debe realizar una copia de seguridad de sus datos y dispositivo antes de instalar cualquier aplicación modded. </li>
|
87 |
-
<li>Q: Es BombSquad Mod APK legal de usar? </li>
|
88 |
-
<li>A: BombSquad Mod APK no es una aplicación oficial del desarrollador, sino una aplicación de terceros que ha sido modificada por algunas fuentes desconocidas. Puede violar los términos y condiciones del juego original y la Google Play Store. Por lo tanto, debe usarlo bajo su propio riesgo y discreción. </li>
|
89 |
-
<li>Q: ¿Puedo jugar BombSquad Mod APK con mis amigos? </li>
|
90 |
-
<li>A: Sí, puede jugar BombSquad Mod APK con sus amigos a nivel local o en línea, utilizando sus dispositivos como controladores. También puede invitar a sus amigos a unirse a su juego o unirse a su juego. </li>
|
91 |
-
<li>Q: ¿Puedo actualizar BombSquad Mod APK? </li>
|
92 |
-
<li>A: No, no se puede actualizar BombSquad Mod APK desde la Google Play Store o el juego original. Tienes que descargar e instalar la última versión de la aplicación modded desde un sitio web de confianza cada vez que hay una actualización. </li>
|
93 |
-
<li>Q: ¿Puedo usar BombSquad Mod APK en dispositivos iOS? </li>
|
94 |
-
<li>A: No, no se puede utilizar BombSquad Mod APK en dispositivos iOS, ya que solo es compatible con dispositivos Android. Sin embargo, puede jugar el juego original en dispositivos iOS descargándolo desde la App Store.</li>
|
95 |
-
</ul></p> 64aa2da5cf<br />
|
96 |
-
<br />
|
97 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BigChungux/Pet_Survey/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Pet Survey
|
3 |
-
emoji: 📚
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.19.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: afl-3.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/parallel.h
DELETED
@@ -1,91 +0,0 @@
|
|
1 |
-
#pragma once
|
2 |
-
|
3 |
-
#include "vector.h"
|
4 |
-
|
5 |
-
#include <mutex>
|
6 |
-
#include <condition_variable>
|
7 |
-
#include <functional>
|
8 |
-
#include <atomic>
|
9 |
-
#include <cstdint>
|
10 |
-
#include <cassert>
|
11 |
-
#include <algorithm>
|
12 |
-
// From https://github.com/mmp/pbrt-v3/blob/master/src/core/parallel.h
|
13 |
-
|
14 |
-
class Barrier {
|
15 |
-
public:
|
16 |
-
Barrier(int count) : count(count) { assert(count > 0); }
|
17 |
-
~Barrier() { assert(count == 0); }
|
18 |
-
void Wait();
|
19 |
-
|
20 |
-
private:
|
21 |
-
std::mutex mutex;
|
22 |
-
std::condition_variable cv;
|
23 |
-
int count;
|
24 |
-
};
|
25 |
-
|
26 |
-
void parallel_for_host(const std::function<void(int64_t)> &func,
|
27 |
-
int64_t count,
|
28 |
-
int chunkSize = 1);
|
29 |
-
extern thread_local int ThreadIndex;
|
30 |
-
void parallel_for_host(
|
31 |
-
std::function<void(Vector2i)> func, const Vector2i count);
|
32 |
-
int num_system_cores();
|
33 |
-
|
34 |
-
void parallel_init();
|
35 |
-
void parallel_cleanup();
|
36 |
-
|
37 |
-
#ifdef __CUDACC__
|
38 |
-
template <typename T>
|
39 |
-
__global__ void parallel_for_device_kernel(T functor, int count) {
|
40 |
-
auto idx = threadIdx.x + blockIdx.x * blockDim.x;
|
41 |
-
if (idx >= count) {
|
42 |
-
return;
|
43 |
-
}
|
44 |
-
functor(idx);
|
45 |
-
}
|
46 |
-
template <typename T>
|
47 |
-
inline void parallel_for_device(T functor,
|
48 |
-
int count,
|
49 |
-
int work_per_thread = 256) {
|
50 |
-
if (count <= 0) {
|
51 |
-
return;
|
52 |
-
}
|
53 |
-
auto block_size = work_per_thread;
|
54 |
-
auto block_count = idiv_ceil(count, block_size);
|
55 |
-
parallel_for_device_kernel<T><<<block_count, block_size>>>(functor, count);
|
56 |
-
}
|
57 |
-
#endif
|
58 |
-
|
59 |
-
template <typename T>
|
60 |
-
inline void parallel_for(T functor,
|
61 |
-
int count,
|
62 |
-
bool use_gpu,
|
63 |
-
int work_per_thread = -1) {
|
64 |
-
if (work_per_thread == -1) {
|
65 |
-
work_per_thread = use_gpu ? 64 : 256;
|
66 |
-
}
|
67 |
-
if (count <= 0) {
|
68 |
-
return;
|
69 |
-
}
|
70 |
-
if (use_gpu) {
|
71 |
-
#ifdef __CUDACC__
|
72 |
-
auto block_size = work_per_thread;
|
73 |
-
auto block_count = idiv_ceil(count, block_size);
|
74 |
-
parallel_for_device_kernel<T><<<block_count, block_size>>>(functor, count);
|
75 |
-
#else
|
76 |
-
throw std::runtime_error("diffvg not compiled with GPU");
|
77 |
-
assert(false);
|
78 |
-
#endif
|
79 |
-
} else {
|
80 |
-
auto num_threads = idiv_ceil(count, work_per_thread);
|
81 |
-
parallel_for_host([&](int thread_index) {
|
82 |
-
auto id_offset = work_per_thread * thread_index;
|
83 |
-
auto work_end = std::min(id_offset + work_per_thread, count);
|
84 |
-
for (int work_id = id_offset; work_id < work_end; work_id++) {
|
85 |
-
auto idx = work_id;
|
86 |
-
assert(idx < count);
|
87 |
-
functor(idx);
|
88 |
-
}
|
89 |
-
}, num_threads);
|
90 |
-
}
|
91 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/detail/complex/clogf.h
DELETED
@@ -1,198 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
* Copyright 2013 Filipe RNC Maia
|
4 |
-
*
|
5 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
-
* you may not use this file except in compliance with the License.
|
7 |
-
* You may obtain a copy of the License at
|
8 |
-
*
|
9 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
10 |
-
*
|
11 |
-
* Unless required by applicable law or agreed to in writing, software
|
12 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
-
* See the License for the specific language governing permissions and
|
15 |
-
* limitations under the License.
|
16 |
-
*/
|
17 |
-
|
18 |
-
/*-
|
19 |
-
* Copyright (c) 2012 Stephen Montgomery-Smith <[email protected]>
|
20 |
-
* All rights reserved.
|
21 |
-
*
|
22 |
-
* Redistribution and use in source and binary forms, with or without
|
23 |
-
* modification, are permitted provided that the following conditions
|
24 |
-
* are met:
|
25 |
-
* 1. Redistributions of source code must retain the above copyright
|
26 |
-
* notice, this list of conditions and the following disclaimer.
|
27 |
-
* 2. Redistributions in binary form must reproduce the above copyright
|
28 |
-
* notice, this list of conditions and the following disclaimer in the
|
29 |
-
* documentation and/or other materials provided with the distribution.
|
30 |
-
*
|
31 |
-
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
32 |
-
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
33 |
-
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
34 |
-
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
35 |
-
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
36 |
-
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
37 |
-
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
38 |
-
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
39 |
-
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
40 |
-
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
41 |
-
* SUCH DAMAGE.
|
42 |
-
*/
|
43 |
-
|
44 |
-
/* adapted from FreeBSDs msun:*/
|
45 |
-
|
46 |
-
#pragma once
|
47 |
-
|
48 |
-
#include <thrust/complex.h>
|
49 |
-
#include <thrust/detail/complex/math_private.h>
|
50 |
-
|
51 |
-
namespace thrust{
|
52 |
-
namespace detail{
|
53 |
-
namespace complex{
|
54 |
-
|
55 |
-
using thrust::complex;
|
56 |
-
|
57 |
-
/* round down to 8 = 24/3 bits */
|
58 |
-
__host__ __device__ inline
|
59 |
-
float trim(float x){
|
60 |
-
uint32_t hx;
|
61 |
-
get_float_word(hx, x);
|
62 |
-
hx &= 0xffff0000;
|
63 |
-
float ret;
|
64 |
-
set_float_word(ret,hx);
|
65 |
-
return ret;
|
66 |
-
}
|
67 |
-
|
68 |
-
|
69 |
-
__host__ __device__ inline
|
70 |
-
complex<float> clogf(const complex<float>& z){
|
71 |
-
|
72 |
-
// Adapted from FreeBSDs msun
|
73 |
-
float x, y;
|
74 |
-
float ax, ay;
|
75 |
-
float x0, y0, x1, y1, x2, y2, t, hm1;
|
76 |
-
float val[12];
|
77 |
-
int i, sorted;
|
78 |
-
const float e = 2.7182818284590452354f;
|
79 |
-
|
80 |
-
x = z.real();
|
81 |
-
y = z.imag();
|
82 |
-
|
83 |
-
/* Handle NaNs using the general formula to mix them right. */
|
84 |
-
if (x != x || y != y){
|
85 |
-
return (complex<float>(std::log(norm(z)), std::atan2(y, x)));
|
86 |
-
}
|
87 |
-
|
88 |
-
ax = std::abs(x);
|
89 |
-
ay = std::abs(y);
|
90 |
-
if (ax < ay) {
|
91 |
-
t = ax;
|
92 |
-
ax = ay;
|
93 |
-
ay = t;
|
94 |
-
}
|
95 |
-
|
96 |
-
/*
|
97 |
-
* To avoid unnecessary overflow, if x and y are very large, divide x
|
98 |
-
* and y by M_E, and then add 1 to the logarithm. This depends on
|
99 |
-
* M_E being larger than sqrt(2).
|
100 |
-
* There is a potential loss of accuracy caused by dividing by M_E,
|
101 |
-
* but this case should happen extremely rarely.
|
102 |
-
*/
|
103 |
-
// For high values of ay -> hypotf(FLT_MAX,ay) = inf
|
104 |
-
// We expect that for values at or below ay = 1e34f this should not happen
|
105 |
-
if (ay > 1e34f){
|
106 |
-
return (complex<float>(std::log(hypotf(x / e, y / e)) + 1.0f, std::atan2(y, x)));
|
107 |
-
}
|
108 |
-
if (ax == 1.f) {
|
109 |
-
if (ay < 1e-19f){
|
110 |
-
return (complex<float>((ay * 0.5f) * ay, std::atan2(y, x)));
|
111 |
-
}
|
112 |
-
return (complex<float>(log1pf(ay * ay) * 0.5f, std::atan2(y, x)));
|
113 |
-
}
|
114 |
-
|
115 |
-
/*
|
116 |
-
* Because atan2 and hypot conform to C99, this also covers all the
|
117 |
-
* edge cases when x or y are 0 or infinite.
|
118 |
-
*/
|
119 |
-
if (ax < 1e-6f || ay < 1e-6f || ax > 1e6f || ay > 1e6f){
|
120 |
-
return (complex<float>(std::log(hypotf(x, y)), std::atan2(y, x)));
|
121 |
-
}
|
122 |
-
|
123 |
-
/*
|
124 |
-
* From this point on, we don't need to worry about underflow or
|
125 |
-
* overflow in calculating ax*ax or ay*ay.
|
126 |
-
*/
|
127 |
-
|
128 |
-
/* Some easy cases. */
|
129 |
-
|
130 |
-
if (ax >= 1.0f){
|
131 |
-
return (complex<float>(log1pf((ax-1.f)*(ax+1.f) + ay*ay) * 0.5f, atan2(y, x)));
|
132 |
-
}
|
133 |
-
|
134 |
-
if (ax*ax + ay*ay <= 0.7f){
|
135 |
-
return (complex<float>(std::log(ax*ax + ay*ay) * 0.5f, std::atan2(y, x)));
|
136 |
-
}
|
137 |
-
|
138 |
-
/*
|
139 |
-
* Take extra care so that ULP of real part is small if hypot(x,y) is
|
140 |
-
* moderately close to 1.
|
141 |
-
*/
|
142 |
-
|
143 |
-
|
144 |
-
x0 = trim(ax);
|
145 |
-
ax = ax-x0;
|
146 |
-
x1 = trim(ax);
|
147 |
-
x2 = ax-x1;
|
148 |
-
y0 = trim(ay);
|
149 |
-
ay = ay-y0;
|
150 |
-
y1 = trim(ay);
|
151 |
-
y2 = ay-y1;
|
152 |
-
|
153 |
-
val[0] = x0*x0;
|
154 |
-
val[1] = y0*y0;
|
155 |
-
val[2] = 2*x0*x1;
|
156 |
-
val[3] = 2*y0*y1;
|
157 |
-
val[4] = x1*x1;
|
158 |
-
val[5] = y1*y1;
|
159 |
-
val[6] = 2*x0*x2;
|
160 |
-
val[7] = 2*y0*y2;
|
161 |
-
val[8] = 2*x1*x2;
|
162 |
-
val[9] = 2*y1*y2;
|
163 |
-
val[10] = x2*x2;
|
164 |
-
val[11] = y2*y2;
|
165 |
-
|
166 |
-
/* Bubble sort. */
|
167 |
-
|
168 |
-
do {
|
169 |
-
sorted = 1;
|
170 |
-
for (i=0;i<11;i++) {
|
171 |
-
if (val[i] < val[i+1]) {
|
172 |
-
sorted = 0;
|
173 |
-
t = val[i];
|
174 |
-
val[i] = val[i+1];
|
175 |
-
val[i+1] = t;
|
176 |
-
}
|
177 |
-
}
|
178 |
-
} while (!sorted);
|
179 |
-
|
180 |
-
hm1 = -1;
|
181 |
-
for (i=0;i<12;i++){
|
182 |
-
hm1 += val[i];
|
183 |
-
}
|
184 |
-
return (complex<float>(0.5f * log1pf(hm1), atan2(y, x)));
|
185 |
-
}
|
186 |
-
|
187 |
-
} // namespace complex
|
188 |
-
|
189 |
-
} // namespace detail
|
190 |
-
|
191 |
-
template <>
|
192 |
-
__host__ __device__
|
193 |
-
inline complex<float> log(const complex<float>& z){
|
194 |
-
return detail::complex::clogf(z);
|
195 |
-
}
|
196 |
-
|
197 |
-
} // namespace thrust
|
198 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/walt/datasets/custom.py
DELETED
@@ -1,324 +0,0 @@
|
|
1 |
-
import os.path as osp
|
2 |
-
import warnings
|
3 |
-
from collections import OrderedDict
|
4 |
-
|
5 |
-
import mmcv
|
6 |
-
import numpy as np
|
7 |
-
from mmcv.utils import print_log
|
8 |
-
from torch.utils.data import Dataset
|
9 |
-
|
10 |
-
from mmdet.core import eval_map, eval_recalls
|
11 |
-
from .builder import DATASETS
|
12 |
-
from .pipelines import Compose
|
13 |
-
|
14 |
-
|
15 |
-
@DATASETS.register_module()
|
16 |
-
class CustomDatasetLocal(Dataset):
|
17 |
-
"""Custom dataset for detection.
|
18 |
-
|
19 |
-
The annotation format is shown as follows. The `ann` field is optional for
|
20 |
-
testing.
|
21 |
-
|
22 |
-
.. code-block:: none
|
23 |
-
|
24 |
-
[
|
25 |
-
{
|
26 |
-
'filename': 'a.jpg',
|
27 |
-
'width': 1280,
|
28 |
-
'height': 720,
|
29 |
-
'ann': {
|
30 |
-
'bboxes': <np.ndarray> (n, 4) in (x1, y1, x2, y2) order.
|
31 |
-
'labels': <np.ndarray> (n, ),
|
32 |
-
'bboxes_ignore': <np.ndarray> (k, 4), (optional field)
|
33 |
-
'labels_ignore': <np.ndarray> (k, 4) (optional field)
|
34 |
-
}
|
35 |
-
},
|
36 |
-
...
|
37 |
-
]
|
38 |
-
|
39 |
-
Args:
|
40 |
-
ann_file (str): Annotation file path.
|
41 |
-
pipeline (list[dict]): Processing pipeline.
|
42 |
-
classes (str | Sequence[str], optional): Specify classes to load.
|
43 |
-
If is None, ``cls.CLASSES`` will be used. Default: None.
|
44 |
-
data_root (str, optional): Data root for ``ann_file``,
|
45 |
-
``img_prefix``, ``seg_prefix``, ``proposal_file`` if specified.
|
46 |
-
test_mode (bool, optional): If set True, annotation will not be loaded.
|
47 |
-
filter_empty_gt (bool, optional): If set true, images without bounding
|
48 |
-
boxes of the dataset's classes will be filtered out. This option
|
49 |
-
only works when `test_mode=False`, i.e., we never filter images
|
50 |
-
during tests.
|
51 |
-
"""
|
52 |
-
|
53 |
-
CLASSES = None
|
54 |
-
|
55 |
-
def __init__(self,
|
56 |
-
ann_file,
|
57 |
-
pipeline,
|
58 |
-
classes=None,
|
59 |
-
data_root=None,
|
60 |
-
img_prefix='',
|
61 |
-
seg_prefix=None,
|
62 |
-
proposal_file=None,
|
63 |
-
test_mode=False,
|
64 |
-
filter_empty_gt=True):
|
65 |
-
self.ann_file = ann_file
|
66 |
-
self.data_root = data_root
|
67 |
-
self.img_prefix = img_prefix
|
68 |
-
self.seg_prefix = seg_prefix
|
69 |
-
self.proposal_file = proposal_file
|
70 |
-
self.test_mode = test_mode
|
71 |
-
self.filter_empty_gt = filter_empty_gt
|
72 |
-
self.CLASSES = self.get_classes(classes)
|
73 |
-
|
74 |
-
# join paths if data_root is specified
|
75 |
-
if self.data_root is not None:
|
76 |
-
if not osp.isabs(self.ann_file):
|
77 |
-
self.ann_file = osp.join(self.data_root, self.ann_file)
|
78 |
-
if not (self.img_prefix is None or osp.isabs(self.img_prefix)):
|
79 |
-
self.img_prefix = osp.join(self.data_root, self.img_prefix)
|
80 |
-
if not (self.seg_prefix is None or osp.isabs(self.seg_prefix)):
|
81 |
-
self.seg_prefix = osp.join(self.data_root, self.seg_prefix)
|
82 |
-
if not (self.proposal_file is None
|
83 |
-
or osp.isabs(self.proposal_file)):
|
84 |
-
self.proposal_file = osp.join(self.data_root,
|
85 |
-
self.proposal_file)
|
86 |
-
# load annotations (and proposals)
|
87 |
-
self.data_infos = self.load_annotations(self.ann_file)
|
88 |
-
|
89 |
-
if self.proposal_file is not None:
|
90 |
-
self.proposals = self.load_proposals(self.proposal_file)
|
91 |
-
else:
|
92 |
-
self.proposals = None
|
93 |
-
|
94 |
-
# filter images too small and containing no annotations
|
95 |
-
if not test_mode:
|
96 |
-
valid_inds = self._filter_imgs()
|
97 |
-
self.data_infos = [self.data_infos[i] for i in valid_inds]
|
98 |
-
if self.proposals is not None:
|
99 |
-
self.proposals = [self.proposals[i] for i in valid_inds]
|
100 |
-
# set group flag for the sampler
|
101 |
-
self._set_group_flag()
|
102 |
-
|
103 |
-
# processing pipeline
|
104 |
-
self.pipeline = Compose(pipeline)
|
105 |
-
|
106 |
-
def __len__(self):
|
107 |
-
"""Total number of samples of data."""
|
108 |
-
return len(self.data_infos)
|
109 |
-
|
110 |
-
def load_annotations(self, ann_file):
|
111 |
-
"""Load annotation from annotation file."""
|
112 |
-
return mmcv.load(ann_file)
|
113 |
-
|
114 |
-
def load_proposals(self, proposal_file):
|
115 |
-
"""Load proposal from proposal file."""
|
116 |
-
return mmcv.load(proposal_file)
|
117 |
-
|
118 |
-
def get_ann_info(self, idx):
|
119 |
-
"""Get annotation by index.
|
120 |
-
|
121 |
-
Args:
|
122 |
-
idx (int): Index of data.
|
123 |
-
|
124 |
-
Returns:
|
125 |
-
dict: Annotation info of specified index.
|
126 |
-
"""
|
127 |
-
|
128 |
-
return self.data_infos[idx]['ann']
|
129 |
-
|
130 |
-
def get_cat_ids(self, idx):
|
131 |
-
"""Get category ids by index.
|
132 |
-
|
133 |
-
Args:
|
134 |
-
idx (int): Index of data.
|
135 |
-
|
136 |
-
Returns:
|
137 |
-
list[int]: All categories in the image of specified index.
|
138 |
-
"""
|
139 |
-
|
140 |
-
return self.data_infos[idx]['ann']['labels'].astype(np.int).tolist()
|
141 |
-
|
142 |
-
def pre_pipeline(self, results):
|
143 |
-
"""Prepare results dict for pipeline."""
|
144 |
-
results['img_prefix'] = self.img_prefix
|
145 |
-
results['seg_prefix'] = self.seg_prefix
|
146 |
-
results['proposal_file'] = self.proposal_file
|
147 |
-
results['bbox_fields'] = []
|
148 |
-
results['bbox3d_fields'] = []
|
149 |
-
results['mask_fields'] = []
|
150 |
-
results['seg_fields'] = []
|
151 |
-
|
152 |
-
def _filter_imgs(self, min_size=32):
|
153 |
-
"""Filter images too small."""
|
154 |
-
if self.filter_empty_gt:
|
155 |
-
warnings.warn(
|
156 |
-
'CustomDataset does not support filtering empty gt images.')
|
157 |
-
valid_inds = []
|
158 |
-
for i, img_info in enumerate(self.data_infos):
|
159 |
-
if min(img_info['width'], img_info['height']) >= min_size:
|
160 |
-
valid_inds.append(i)
|
161 |
-
return valid_inds
|
162 |
-
|
163 |
-
def _set_group_flag(self):
|
164 |
-
"""Set flag according to image aspect ratio.
|
165 |
-
|
166 |
-
Images with aspect ratio greater than 1 will be set as group 1,
|
167 |
-
otherwise group 0.
|
168 |
-
"""
|
169 |
-
self.flag = np.zeros(len(self), dtype=np.uint8)
|
170 |
-
for i in range(len(self)):
|
171 |
-
img_info = self.data_infos[i]
|
172 |
-
if img_info['width'] / img_info['height'] > 1:
|
173 |
-
self.flag[i] = 1
|
174 |
-
|
175 |
-
def _rand_another(self, idx):
|
176 |
-
"""Get another random index from the same group as the given index."""
|
177 |
-
pool = np.where(self.flag == self.flag[idx])[0]
|
178 |
-
return np.random.choice(pool)
|
179 |
-
|
180 |
-
def __getitem__(self, idx):
|
181 |
-
"""Get training/test data after pipeline.
|
182 |
-
|
183 |
-
Args:
|
184 |
-
idx (int): Index of data.
|
185 |
-
|
186 |
-
Returns:
|
187 |
-
dict: Training/test data (with annotation if `test_mode` is set \
|
188 |
-
True).
|
189 |
-
"""
|
190 |
-
|
191 |
-
if self.test_mode:
|
192 |
-
return self.prepare_test_img(idx)
|
193 |
-
while True:
|
194 |
-
data = self.prepare_train_img(idx)
|
195 |
-
if data is None:
|
196 |
-
idx = self._rand_another(idx)
|
197 |
-
continue
|
198 |
-
return data
|
199 |
-
|
200 |
-
def prepare_train_img(self, idx):
|
201 |
-
"""Get training data and annotations after pipeline.
|
202 |
-
|
203 |
-
Args:
|
204 |
-
idx (int): Index of data.
|
205 |
-
|
206 |
-
Returns:
|
207 |
-
dict: Training data and annotation after pipeline with new keys \
|
208 |
-
introduced by pipeline.
|
209 |
-
"""
|
210 |
-
|
211 |
-
img_info = self.data_infos[idx]
|
212 |
-
ann_info = self.get_ann_info(idx)
|
213 |
-
results = dict(img_info=img_info, ann_info=ann_info)
|
214 |
-
if self.proposals is not None:
|
215 |
-
results['proposals'] = self.proposals[idx]
|
216 |
-
self.pre_pipeline(results)
|
217 |
-
return self.pipeline(results)
|
218 |
-
|
219 |
-
def prepare_test_img(self, idx):
|
220 |
-
"""Get testing data after pipeline.
|
221 |
-
|
222 |
-
Args:
|
223 |
-
idx (int): Index of data.
|
224 |
-
|
225 |
-
Returns:
|
226 |
-
dict: Testing data after pipeline with new keys introduced by \
|
227 |
-
pipeline.
|
228 |
-
"""
|
229 |
-
|
230 |
-
img_info = self.data_infos[idx]
|
231 |
-
results = dict(img_info=img_info)
|
232 |
-
if self.proposals is not None:
|
233 |
-
results['proposals'] = self.proposals[idx]
|
234 |
-
self.pre_pipeline(results)
|
235 |
-
return self.pipeline(results)
|
236 |
-
|
237 |
-
@classmethod
|
238 |
-
def get_classes(cls, classes=None):
|
239 |
-
"""Get class names of current dataset.
|
240 |
-
|
241 |
-
Args:
|
242 |
-
classes (Sequence[str] | str | None): If classes is None, use
|
243 |
-
default CLASSES defined by builtin dataset. If classes is a
|
244 |
-
string, take it as a file name. The file contains the name of
|
245 |
-
classes where each line contains one class name. If classes is
|
246 |
-
a tuple or list, override the CLASSES defined by the dataset.
|
247 |
-
|
248 |
-
Returns:
|
249 |
-
tuple[str] or list[str]: Names of categories of the dataset.
|
250 |
-
"""
|
251 |
-
if classes is None:
|
252 |
-
return cls.CLASSES
|
253 |
-
|
254 |
-
if isinstance(classes, str):
|
255 |
-
# take it as a file path
|
256 |
-
class_names = mmcv.list_from_file(classes)
|
257 |
-
elif isinstance(classes, (tuple, list)):
|
258 |
-
class_names = classes
|
259 |
-
else:
|
260 |
-
raise ValueError(f'Unsupported type {type(classes)} of classes.')
|
261 |
-
|
262 |
-
return class_names
|
263 |
-
|
264 |
-
def format_results(self, results, **kwargs):
|
265 |
-
"""Place holder to format result to dataset specific output."""
|
266 |
-
|
267 |
-
def evaluate(self,
|
268 |
-
results,
|
269 |
-
metric='mAP',
|
270 |
-
logger=None,
|
271 |
-
proposal_nums=(100, 300, 1000),
|
272 |
-
iou_thr=0.5,
|
273 |
-
scale_ranges=None):
|
274 |
-
"""Evaluate the dataset.
|
275 |
-
|
276 |
-
Args:
|
277 |
-
results (list): Testing results of the dataset.
|
278 |
-
metric (str | list[str]): Metrics to be evaluated.
|
279 |
-
logger (logging.Logger | None | str): Logger used for printing
|
280 |
-
related information during evaluation. Default: None.
|
281 |
-
proposal_nums (Sequence[int]): Proposal number used for evaluating
|
282 |
-
recalls, such as recall@100, recall@1000.
|
283 |
-
Default: (100, 300, 1000).
|
284 |
-
iou_thr (float | list[float]): IoU threshold. Default: 0.5.
|
285 |
-
scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP.
|
286 |
-
Default: None.
|
287 |
-
"""
|
288 |
-
|
289 |
-
if not isinstance(metric, str):
|
290 |
-
assert len(metric) == 1
|
291 |
-
metric = metric[0]
|
292 |
-
allowed_metrics = ['mAP', 'recall']
|
293 |
-
if metric not in allowed_metrics:
|
294 |
-
raise KeyError(f'metric {metric} is not supported')
|
295 |
-
annotations = [self.get_ann_info(i) for i in range(len(self))]
|
296 |
-
eval_results = OrderedDict()
|
297 |
-
iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr
|
298 |
-
if metric == 'mAP':
|
299 |
-
assert isinstance(iou_thrs, list)
|
300 |
-
mean_aps = []
|
301 |
-
for iou_thr in iou_thrs:
|
302 |
-
print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}')
|
303 |
-
mean_ap, _ = eval_map(
|
304 |
-
results,
|
305 |
-
annotations,
|
306 |
-
scale_ranges=scale_ranges,
|
307 |
-
iou_thr=iou_thr,
|
308 |
-
dataset=self.CLASSES,
|
309 |
-
logger=logger)
|
310 |
-
mean_aps.append(mean_ap)
|
311 |
-
eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)
|
312 |
-
eval_results['mAP'] = sum(mean_aps) / len(mean_aps)
|
313 |
-
elif metric == 'recall':
|
314 |
-
gt_bboxes = [ann['bboxes'] for ann in annotations]
|
315 |
-
recalls = eval_recalls(
|
316 |
-
gt_bboxes, results, proposal_nums, iou_thr, logger=logger)
|
317 |
-
for i, num in enumerate(proposal_nums):
|
318 |
-
for j, iou in enumerate(iou_thrs):
|
319 |
-
eval_results[f'recall@{num}@{iou}'] = recalls[i, j]
|
320 |
-
if recalls.shape[1] > 1:
|
321 |
-
ar = recalls.mean(axis=1)
|
322 |
-
for i, num in enumerate(proposal_nums):
|
323 |
-
eval_results[f'AR@{num}'] = ar[i]
|
324 |
-
return eval_results
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/data/samplers/__init__.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
from .distributed_sampler import InferenceSampler, RepeatFactorTrainingSampler, TrainingSampler
|
3 |
-
from .grouped_batch_sampler import GroupedBatchSampler
|
4 |
-
|
5 |
-
__all__ = [
|
6 |
-
"GroupedBatchSampler",
|
7 |
-
"TrainingSampler",
|
8 |
-
"InferenceSampler",
|
9 |
-
"RepeatFactorTrainingSampler",
|
10 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/evaluation/sem_seg_evaluation.py
DELETED
@@ -1,184 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import itertools
|
3 |
-
import json
|
4 |
-
import logging
|
5 |
-
import numpy as np
|
6 |
-
import os
|
7 |
-
from collections import OrderedDict
|
8 |
-
import PIL.Image as Image
|
9 |
-
import pycocotools.mask as mask_util
|
10 |
-
import torch
|
11 |
-
|
12 |
-
from detectron2.data import DatasetCatalog, MetadataCatalog
|
13 |
-
from detectron2.utils.comm import all_gather, is_main_process, synchronize
|
14 |
-
from detectron2.utils.file_io import PathManager
|
15 |
-
|
16 |
-
from .evaluator import DatasetEvaluator
|
17 |
-
|
18 |
-
|
19 |
-
class SemSegEvaluator(DatasetEvaluator):
|
20 |
-
"""
|
21 |
-
Evaluate semantic segmentation metrics.
|
22 |
-
"""
|
23 |
-
|
24 |
-
def __init__(
|
25 |
-
self,
|
26 |
-
dataset_name,
|
27 |
-
distributed=True,
|
28 |
-
output_dir=None,
|
29 |
-
*,
|
30 |
-
num_classes=None,
|
31 |
-
ignore_label=None,
|
32 |
-
):
|
33 |
-
"""
|
34 |
-
Args:
|
35 |
-
dataset_name (str): name of the dataset to be evaluated.
|
36 |
-
distributed (bool): if True, will collect results from all ranks for evaluation.
|
37 |
-
Otherwise, will evaluate the results in the current process.
|
38 |
-
output_dir (str): an output directory to dump results.
|
39 |
-
num_classes, ignore_label: deprecated argument
|
40 |
-
"""
|
41 |
-
self._logger = logging.getLogger(__name__)
|
42 |
-
if num_classes is not None:
|
43 |
-
self._logger.warn(
|
44 |
-
"SemSegEvaluator(num_classes) is deprecated! It should be obtained from metadata."
|
45 |
-
)
|
46 |
-
if ignore_label is not None:
|
47 |
-
self._logger.warn(
|
48 |
-
"SemSegEvaluator(ignore_label) is deprecated! It should be obtained from metadata."
|
49 |
-
)
|
50 |
-
self._dataset_name = dataset_name
|
51 |
-
self._distributed = distributed
|
52 |
-
self._output_dir = output_dir
|
53 |
-
|
54 |
-
self._cpu_device = torch.device("cpu")
|
55 |
-
|
56 |
-
self.input_file_to_gt_file = {
|
57 |
-
dataset_record["file_name"]: dataset_record["sem_seg_file_name"]
|
58 |
-
for dataset_record in DatasetCatalog.get(dataset_name)
|
59 |
-
}
|
60 |
-
|
61 |
-
meta = MetadataCatalog.get(dataset_name)
|
62 |
-
# Dict that maps contiguous training ids to COCO category ids
|
63 |
-
try:
|
64 |
-
c2d = meta.stuff_dataset_id_to_contiguous_id
|
65 |
-
self._contiguous_id_to_dataset_id = {v: k for k, v in c2d.items()}
|
66 |
-
except AttributeError:
|
67 |
-
self._contiguous_id_to_dataset_id = None
|
68 |
-
self._class_names = meta.stuff_classes
|
69 |
-
self._num_classes = len(meta.stuff_classes)
|
70 |
-
if num_classes is not None:
|
71 |
-
assert self._num_classes == num_classes, f"{self._num_classes} != {num_classes}"
|
72 |
-
self._ignore_label = ignore_label if ignore_label is not None else meta.ignore_label
|
73 |
-
|
74 |
-
def reset(self):
|
75 |
-
self._conf_matrix = np.zeros((self._num_classes + 1, self._num_classes + 1), dtype=np.int64)
|
76 |
-
self._predictions = []
|
77 |
-
|
78 |
-
def process(self, inputs, outputs):
|
79 |
-
"""
|
80 |
-
Args:
|
81 |
-
inputs: the inputs to a model.
|
82 |
-
It is a list of dicts. Each dict corresponds to an image and
|
83 |
-
contains keys like "height", "width", "file_name".
|
84 |
-
outputs: the outputs of a model. It is either list of semantic segmentation predictions
|
85 |
-
(Tensor [H, W]) or list of dicts with key "sem_seg" that contains semantic
|
86 |
-
segmentation prediction in the same format.
|
87 |
-
"""
|
88 |
-
for input, output in zip(inputs, outputs):
|
89 |
-
output = output["sem_seg"].argmax(dim=0).to(self._cpu_device)
|
90 |
-
pred = np.array(output, dtype=np.int)
|
91 |
-
with PathManager.open(self.input_file_to_gt_file[input["file_name"]], "rb") as f:
|
92 |
-
gt = np.array(Image.open(f), dtype=np.int)
|
93 |
-
|
94 |
-
gt[gt == self._ignore_label] = self._num_classes
|
95 |
-
|
96 |
-
self._conf_matrix += np.bincount(
|
97 |
-
(self._num_classes + 1) * pred.reshape(-1) + gt.reshape(-1),
|
98 |
-
minlength=self._conf_matrix.size,
|
99 |
-
).reshape(self._conf_matrix.shape)
|
100 |
-
|
101 |
-
self._predictions.extend(self.encode_json_sem_seg(pred, input["file_name"]))
|
102 |
-
|
103 |
-
def evaluate(self):
|
104 |
-
"""
|
105 |
-
Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval):
|
106 |
-
|
107 |
-
* Mean intersection-over-union averaged across classes (mIoU)
|
108 |
-
* Frequency Weighted IoU (fwIoU)
|
109 |
-
* Mean pixel accuracy averaged across classes (mACC)
|
110 |
-
* Pixel Accuracy (pACC)
|
111 |
-
"""
|
112 |
-
if self._distributed:
|
113 |
-
synchronize()
|
114 |
-
conf_matrix_list = all_gather(self._conf_matrix)
|
115 |
-
self._predictions = all_gather(self._predictions)
|
116 |
-
self._predictions = list(itertools.chain(*self._predictions))
|
117 |
-
if not is_main_process():
|
118 |
-
return
|
119 |
-
|
120 |
-
self._conf_matrix = np.zeros_like(self._conf_matrix)
|
121 |
-
for conf_matrix in conf_matrix_list:
|
122 |
-
self._conf_matrix += conf_matrix
|
123 |
-
|
124 |
-
if self._output_dir:
|
125 |
-
PathManager.mkdirs(self._output_dir)
|
126 |
-
file_path = os.path.join(self._output_dir, "sem_seg_predictions.json")
|
127 |
-
with PathManager.open(file_path, "w") as f:
|
128 |
-
f.write(json.dumps(self._predictions))
|
129 |
-
|
130 |
-
acc = np.full(self._num_classes, np.nan, dtype=np.float)
|
131 |
-
iou = np.full(self._num_classes, np.nan, dtype=np.float)
|
132 |
-
tp = self._conf_matrix.diagonal()[:-1].astype(np.float)
|
133 |
-
pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(np.float)
|
134 |
-
class_weights = pos_gt / np.sum(pos_gt)
|
135 |
-
pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(np.float)
|
136 |
-
acc_valid = pos_gt > 0
|
137 |
-
acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid]
|
138 |
-
iou_valid = (pos_gt + pos_pred) > 0
|
139 |
-
union = pos_gt + pos_pred - tp
|
140 |
-
iou[acc_valid] = tp[acc_valid] / union[acc_valid]
|
141 |
-
macc = np.sum(acc[acc_valid]) / np.sum(acc_valid)
|
142 |
-
miou = np.sum(iou[acc_valid]) / np.sum(iou_valid)
|
143 |
-
fiou = np.sum(iou[acc_valid] * class_weights[acc_valid])
|
144 |
-
pacc = np.sum(tp) / np.sum(pos_gt)
|
145 |
-
|
146 |
-
res = {}
|
147 |
-
res["mIoU"] = 100 * miou
|
148 |
-
res["fwIoU"] = 100 * fiou
|
149 |
-
for i, name in enumerate(self._class_names):
|
150 |
-
res["IoU-{}".format(name)] = 100 * iou[i]
|
151 |
-
res["mACC"] = 100 * macc
|
152 |
-
res["pACC"] = 100 * pacc
|
153 |
-
for i, name in enumerate(self._class_names):
|
154 |
-
res["ACC-{}".format(name)] = 100 * acc[i]
|
155 |
-
|
156 |
-
if self._output_dir:
|
157 |
-
file_path = os.path.join(self._output_dir, "sem_seg_evaluation.pth")
|
158 |
-
with PathManager.open(file_path, "wb") as f:
|
159 |
-
torch.save(res, f)
|
160 |
-
results = OrderedDict({"sem_seg": res})
|
161 |
-
self._logger.info(results)
|
162 |
-
return results
|
163 |
-
|
164 |
-
def encode_json_sem_seg(self, sem_seg, input_file_name):
|
165 |
-
"""
|
166 |
-
Convert semantic segmentation to COCO stuff format with segments encoded as RLEs.
|
167 |
-
See http://cocodataset.org/#format-results
|
168 |
-
"""
|
169 |
-
json_list = []
|
170 |
-
for label in np.unique(sem_seg):
|
171 |
-
if self._contiguous_id_to_dataset_id is not None:
|
172 |
-
assert (
|
173 |
-
label in self._contiguous_id_to_dataset_id
|
174 |
-
), "Label {} is not in the metadata info for {}".format(label, self._dataset_name)
|
175 |
-
dataset_id = self._contiguous_id_to_dataset_id[label]
|
176 |
-
else:
|
177 |
-
dataset_id = int(label)
|
178 |
-
mask = (sem_seg == label).astype(np.uint8)
|
179 |
-
mask_rle = mask_util.encode(np.array(mask[:, :, None], order="F"))[0]
|
180 |
-
mask_rle["counts"] = mask_rle["counts"].decode("utf-8")
|
181 |
-
json_list.append(
|
182 |
-
{"file_name": input_file_name, "category_id": dataset_id, "segmentation": mask_rle}
|
183 |
-
)
|
184 |
-
return json_list
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/modeling/proposal_generator/proposal_utils.py
DELETED
@@ -1,200 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import logging
|
3 |
-
import math
|
4 |
-
from typing import List, Tuple, Union
|
5 |
-
import torch
|
6 |
-
|
7 |
-
from detectron2.layers import batched_nms, cat
|
8 |
-
from detectron2.structures import Boxes, Instances
|
9 |
-
from detectron2.utils.env import TORCH_VERSION
|
10 |
-
|
11 |
-
logger = logging.getLogger(__name__)
|
12 |
-
|
13 |
-
|
14 |
-
def _is_tracing():
|
15 |
-
if torch.jit.is_scripting():
|
16 |
-
# https://github.com/pytorch/pytorch/issues/47379
|
17 |
-
return False
|
18 |
-
else:
|
19 |
-
return TORCH_VERSION >= (1, 7) and torch.jit.is_tracing()
|
20 |
-
|
21 |
-
|
22 |
-
def find_top_rpn_proposals(
|
23 |
-
proposals: List[torch.Tensor],
|
24 |
-
pred_objectness_logits: List[torch.Tensor],
|
25 |
-
image_sizes: List[Tuple[int, int]],
|
26 |
-
nms_thresh: float,
|
27 |
-
pre_nms_topk: int,
|
28 |
-
post_nms_topk: int,
|
29 |
-
min_box_size: float,
|
30 |
-
training: bool,
|
31 |
-
):
|
32 |
-
"""
|
33 |
-
For each feature map, select the `pre_nms_topk` highest scoring proposals,
|
34 |
-
apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk`
|
35 |
-
highest scoring proposals among all the feature maps for each image.
|
36 |
-
|
37 |
-
Args:
|
38 |
-
proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 4).
|
39 |
-
All proposal predictions on the feature maps.
|
40 |
-
pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A).
|
41 |
-
image_sizes (list[tuple]): sizes (h, w) for each image
|
42 |
-
nms_thresh (float): IoU threshold to use for NMS
|
43 |
-
pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS.
|
44 |
-
When RPN is run on multiple feature maps (as in FPN) this number is per
|
45 |
-
feature map.
|
46 |
-
post_nms_topk (int): number of top k scoring proposals to keep after applying NMS.
|
47 |
-
When RPN is run on multiple feature maps (as in FPN) this number is total,
|
48 |
-
over all feature maps.
|
49 |
-
min_box_size (float): minimum proposal box side length in pixels (absolute units
|
50 |
-
wrt input images).
|
51 |
-
training (bool): True if proposals are to be used in training, otherwise False.
|
52 |
-
This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..."
|
53 |
-
comment.
|
54 |
-
|
55 |
-
Returns:
|
56 |
-
list[Instances]: list of N Instances. The i-th Instances
|
57 |
-
stores post_nms_topk object proposals for image i, sorted by their
|
58 |
-
objectness score in descending order.
|
59 |
-
"""
|
60 |
-
num_images = len(image_sizes)
|
61 |
-
device = proposals[0].device
|
62 |
-
|
63 |
-
# 1. Select top-k anchor for every level and every image
|
64 |
-
topk_scores = [] # #lvl Tensor, each of shape N x topk
|
65 |
-
topk_proposals = []
|
66 |
-
level_ids = [] # #lvl Tensor, each of shape (topk,)
|
67 |
-
batch_idx = torch.arange(num_images, device=device)
|
68 |
-
for level_id, (proposals_i, logits_i) in enumerate(zip(proposals, pred_objectness_logits)):
|
69 |
-
Hi_Wi_A = logits_i.shape[1]
|
70 |
-
if isinstance(Hi_Wi_A, torch.Tensor): # it's a tensor in tracing
|
71 |
-
num_proposals_i = torch.clamp(Hi_Wi_A, max=pre_nms_topk)
|
72 |
-
else:
|
73 |
-
num_proposals_i = min(Hi_Wi_A, pre_nms_topk)
|
74 |
-
|
75 |
-
# sort is faster than topk: https://github.com/pytorch/pytorch/issues/22812
|
76 |
-
# topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1)
|
77 |
-
logits_i, idx = logits_i.sort(descending=True, dim=1)
|
78 |
-
topk_scores_i = logits_i.narrow(1, 0, num_proposals_i)
|
79 |
-
topk_idx = idx.narrow(1, 0, num_proposals_i)
|
80 |
-
|
81 |
-
# each is N x topk
|
82 |
-
topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 4
|
83 |
-
|
84 |
-
topk_proposals.append(topk_proposals_i)
|
85 |
-
topk_scores.append(topk_scores_i)
|
86 |
-
level_ids.append(torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device))
|
87 |
-
|
88 |
-
# 2. Concat all levels together
|
89 |
-
topk_scores = cat(topk_scores, dim=1)
|
90 |
-
topk_proposals = cat(topk_proposals, dim=1)
|
91 |
-
level_ids = cat(level_ids, dim=0)
|
92 |
-
|
93 |
-
# 3. For each image, run a per-level NMS, and choose topk results.
|
94 |
-
results: List[Instances] = []
|
95 |
-
for n, image_size in enumerate(image_sizes):
|
96 |
-
boxes = Boxes(topk_proposals[n])
|
97 |
-
scores_per_img = topk_scores[n]
|
98 |
-
lvl = level_ids
|
99 |
-
|
100 |
-
valid_mask = torch.isfinite(boxes.tensor).all(dim=1) & torch.isfinite(scores_per_img)
|
101 |
-
if not valid_mask.all():
|
102 |
-
if training:
|
103 |
-
raise FloatingPointError(
|
104 |
-
"Predicted boxes or scores contain Inf/NaN. Training has diverged."
|
105 |
-
)
|
106 |
-
boxes = boxes[valid_mask]
|
107 |
-
scores_per_img = scores_per_img[valid_mask]
|
108 |
-
lvl = lvl[valid_mask]
|
109 |
-
boxes.clip(image_size)
|
110 |
-
|
111 |
-
# filter empty boxes
|
112 |
-
keep = boxes.nonempty(threshold=min_box_size)
|
113 |
-
if _is_tracing() or keep.sum().item() != len(boxes):
|
114 |
-
boxes, scores_per_img, lvl = boxes[keep], scores_per_img[keep], lvl[keep]
|
115 |
-
|
116 |
-
keep = batched_nms(boxes.tensor, scores_per_img, lvl, nms_thresh)
|
117 |
-
# In Detectron1, there was different behavior during training vs. testing.
|
118 |
-
# (https://github.com/facebookresearch/Detectron/issues/459)
|
119 |
-
# During training, topk is over the proposals from *all* images in the training batch.
|
120 |
-
# During testing, it is over the proposals for each image separately.
|
121 |
-
# As a result, the training behavior becomes batch-dependent,
|
122 |
-
# and the configuration "POST_NMS_TOPK_TRAIN" end up relying on the batch size.
|
123 |
-
# This bug is addressed in Detectron2 to make the behavior independent of batch size.
|
124 |
-
keep = keep[:post_nms_topk] # keep is already sorted
|
125 |
-
|
126 |
-
res = Instances(image_size)
|
127 |
-
res.proposal_boxes = boxes[keep]
|
128 |
-
res.objectness_logits = scores_per_img[keep]
|
129 |
-
results.append(res)
|
130 |
-
return results
|
131 |
-
|
132 |
-
|
133 |
-
def add_ground_truth_to_proposals(
|
134 |
-
gt: Union[List[Instances], List[Boxes]], proposals: List[Instances]
|
135 |
-
) -> List[Instances]:
|
136 |
-
"""
|
137 |
-
Call `add_ground_truth_to_proposals_single_image` for all images.
|
138 |
-
|
139 |
-
Args:
|
140 |
-
gt(Union[List[Instances], List[Boxes]): list of N elements. Element i is a Instances
|
141 |
-
representing the ground-truth for image i.
|
142 |
-
proposals (list[Instances]): list of N elements. Element i is a Instances
|
143 |
-
representing the proposals for image i.
|
144 |
-
|
145 |
-
Returns:
|
146 |
-
list[Instances]: list of N Instances. Each is the proposals for the image,
|
147 |
-
with field "proposal_boxes" and "objectness_logits".
|
148 |
-
"""
|
149 |
-
assert gt is not None
|
150 |
-
|
151 |
-
if len(proposals) != len(gt):
|
152 |
-
raise ValueError("proposals and gt should have the same length as the number of images!")
|
153 |
-
if len(proposals) == 0:
|
154 |
-
return proposals
|
155 |
-
|
156 |
-
return [
|
157 |
-
add_ground_truth_to_proposals_single_image(gt_i, proposals_i)
|
158 |
-
for gt_i, proposals_i in zip(gt, proposals)
|
159 |
-
]
|
160 |
-
|
161 |
-
|
162 |
-
def add_ground_truth_to_proposals_single_image(
|
163 |
-
gt: Union[Instances, Boxes], proposals: Instances
|
164 |
-
) -> Instances:
|
165 |
-
"""
|
166 |
-
Augment `proposals` with `gt`.
|
167 |
-
|
168 |
-
Args:
|
169 |
-
Same as `add_ground_truth_to_proposals`, but with gt and proposals
|
170 |
-
per image.
|
171 |
-
|
172 |
-
Returns:
|
173 |
-
Same as `add_ground_truth_to_proposals`, but for only one image.
|
174 |
-
"""
|
175 |
-
if isinstance(gt, Boxes):
|
176 |
-
# convert Boxes to Instances
|
177 |
-
gt = Instances(proposals.image_size, gt_boxes=gt)
|
178 |
-
|
179 |
-
gt_boxes = gt.gt_boxes
|
180 |
-
device = proposals.objectness_logits.device
|
181 |
-
# Assign all ground-truth boxes an objectness logit corresponding to
|
182 |
-
# P(object) = sigmoid(logit) =~ 1.
|
183 |
-
gt_logit_value = math.log((1.0 - 1e-10) / (1 - (1.0 - 1e-10)))
|
184 |
-
gt_logits = gt_logit_value * torch.ones(len(gt_boxes), device=device)
|
185 |
-
|
186 |
-
# Concatenating gt_boxes with proposals requires them to have the same fields
|
187 |
-
gt_proposal = Instances(proposals.image_size, **gt.get_fields())
|
188 |
-
gt_proposal.proposal_boxes = gt_boxes
|
189 |
-
gt_proposal.objectness_logits = gt_logits
|
190 |
-
|
191 |
-
for key in proposals.get_fields().keys():
|
192 |
-
assert gt_proposal.has(
|
193 |
-
key
|
194 |
-
), "The attribute '{}' in `proposals` does not exist in `gt`".format(key)
|
195 |
-
|
196 |
-
# NOTE: Instances.cat only use fields from the first item. Extra fields in latter items
|
197 |
-
# will be thrown away.
|
198 |
-
new_proposals = Instances.cat([proposals, gt_proposal])
|
199 |
-
|
200 |
-
return new_proposals
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChenyangSi/FreeU/free_lunch_utils.py
DELETED
@@ -1,340 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.fft as fft
|
3 |
-
from diffusers.models.unet_2d_condition import logger
|
4 |
-
from diffusers.utils import is_torch_version
|
5 |
-
from typing import Any, Dict, List, Optional, Tuple, Union
|
6 |
-
|
7 |
-
|
8 |
-
def isinstance_str(x: object, cls_name: str):
|
9 |
-
"""
|
10 |
-
Checks whether x has any class *named* cls_name in its ancestry.
|
11 |
-
Doesn't require access to the class's implementation.
|
12 |
-
|
13 |
-
Useful for patching!
|
14 |
-
"""
|
15 |
-
|
16 |
-
for _cls in x.__class__.__mro__:
|
17 |
-
if _cls.__name__ == cls_name:
|
18 |
-
return True
|
19 |
-
|
20 |
-
return False
|
21 |
-
|
22 |
-
|
23 |
-
def Fourier_filter(x, threshold, scale):
|
24 |
-
dtype = x.dtype
|
25 |
-
x = x.type(torch.float32)
|
26 |
-
# FFT
|
27 |
-
x_freq = fft.fftn(x, dim=(-2, -1))
|
28 |
-
x_freq = fft.fftshift(x_freq, dim=(-2, -1))
|
29 |
-
|
30 |
-
B, C, H, W = x_freq.shape
|
31 |
-
mask = torch.ones((B, C, H, W)).cuda()
|
32 |
-
|
33 |
-
crow, ccol = H // 2, W //2
|
34 |
-
mask[..., crow - threshold:crow + threshold, ccol - threshold:ccol + threshold] = scale
|
35 |
-
x_freq = x_freq * mask
|
36 |
-
|
37 |
-
# IFFT
|
38 |
-
x_freq = fft.ifftshift(x_freq, dim=(-2, -1))
|
39 |
-
x_filtered = fft.ifftn(x_freq, dim=(-2, -1)).real
|
40 |
-
|
41 |
-
x_filtered = x_filtered.type(dtype)
|
42 |
-
return x_filtered
|
43 |
-
|
44 |
-
|
45 |
-
def register_upblock2d(model):
|
46 |
-
def up_forward(self):
|
47 |
-
def forward(hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):
|
48 |
-
for resnet in self.resnets:
|
49 |
-
# pop res hidden states
|
50 |
-
res_hidden_states = res_hidden_states_tuple[-1]
|
51 |
-
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
|
52 |
-
#print(f"in upblock2d, hidden states shape: {hidden_states.shape}")
|
53 |
-
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
|
54 |
-
|
55 |
-
if self.training and self.gradient_checkpointing:
|
56 |
-
|
57 |
-
def create_custom_forward(module):
|
58 |
-
def custom_forward(*inputs):
|
59 |
-
return module(*inputs)
|
60 |
-
|
61 |
-
return custom_forward
|
62 |
-
|
63 |
-
if is_torch_version(">=", "1.11.0"):
|
64 |
-
hidden_states = torch.utils.checkpoint.checkpoint(
|
65 |
-
create_custom_forward(resnet), hidden_states, temb, use_reentrant=False
|
66 |
-
)
|
67 |
-
else:
|
68 |
-
hidden_states = torch.utils.checkpoint.checkpoint(
|
69 |
-
create_custom_forward(resnet), hidden_states, temb
|
70 |
-
)
|
71 |
-
else:
|
72 |
-
hidden_states = resnet(hidden_states, temb)
|
73 |
-
|
74 |
-
if self.upsamplers is not None:
|
75 |
-
for upsampler in self.upsamplers:
|
76 |
-
hidden_states = upsampler(hidden_states, upsample_size)
|
77 |
-
|
78 |
-
return hidden_states
|
79 |
-
|
80 |
-
return forward
|
81 |
-
|
82 |
-
for i, upsample_block in enumerate(model.unet.up_blocks):
|
83 |
-
if isinstance_str(upsample_block, "UpBlock2D"):
|
84 |
-
upsample_block.forward = up_forward(upsample_block)
|
85 |
-
|
86 |
-
|
87 |
-
def register_free_upblock2d(model, b1=1.2, b2=1.4, s1=0.9, s2=0.2):
|
88 |
-
def up_forward(self):
|
89 |
-
def forward(hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):
|
90 |
-
for resnet in self.resnets:
|
91 |
-
# pop res hidden states
|
92 |
-
res_hidden_states = res_hidden_states_tuple[-1]
|
93 |
-
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
|
94 |
-
#print(f"in free upblock2d, hidden states shape: {hidden_states.shape}")
|
95 |
-
|
96 |
-
# # --------------- FreeU code -----------------------
|
97 |
-
# # Only operate on the first two stages
|
98 |
-
# if hidden_states.shape[1] == 1280:
|
99 |
-
# hidden_states[:,:640] = hidden_states[:,:640] * self.b1
|
100 |
-
# res_hidden_states = Fourier_filter(res_hidden_states, threshold=1, scale=self.s1)
|
101 |
-
# if hidden_states.shape[1] == 640:
|
102 |
-
# hidden_states[:,:320] = hidden_states[:,:320] * self.b2
|
103 |
-
# res_hidden_states = Fourier_filter(res_hidden_states, threshold=1, scale=self.s2)
|
104 |
-
# # ---------------------------------------------------------
|
105 |
-
|
106 |
-
# --------------- FreeU code -----------------------
|
107 |
-
# Only operate on the first two stages
|
108 |
-
if hidden_states.shape[1] == 1280:
|
109 |
-
hidden_mean = hidden_states.mean(1).unsqueeze(1)
|
110 |
-
B = hidden_mean.shape[0]
|
111 |
-
hidden_max, _ = torch.max(hidden_mean.view(B, -1), dim=-1, keepdim=True)
|
112 |
-
hidden_min, _ = torch.min(hidden_mean.view(B, -1), dim=-1, keepdim=True)
|
113 |
-
|
114 |
-
hidden_mean = (hidden_mean - hidden_min.unsqueeze(2).unsqueeze(3)) / (hidden_max - hidden_min).unsqueeze(2).unsqueeze(3)
|
115 |
-
|
116 |
-
hidden_states[:,:640] = hidden_states[:,:640] * ((self.b1 - 1 ) * hidden_mean + 1)
|
117 |
-
res_hidden_states = Fourier_filter(res_hidden_states, threshold=1, scale=self.s1)
|
118 |
-
if hidden_states.shape[1] == 640:
|
119 |
-
hidden_mean = hidden_states.mean(1).unsqueeze(1)
|
120 |
-
B = hidden_mean.shape[0]
|
121 |
-
hidden_max, _ = torch.max(hidden_mean.view(B, -1), dim=-1, keepdim=True)
|
122 |
-
hidden_min, _ = torch.min(hidden_mean.view(B, -1), dim=-1, keepdim=True)
|
123 |
-
hidden_mean = (hidden_mean - hidden_min.unsqueeze(2).unsqueeze(3)) / (hidden_max - hidden_min).unsqueeze(2).unsqueeze(3)
|
124 |
-
|
125 |
-
hidden_states[:,:320] = hidden_states[:,:320] * ((self.b2 - 1 ) * hidden_mean + 1)
|
126 |
-
res_hidden_states = Fourier_filter(res_hidden_states, threshold=1, scale=self.s2)
|
127 |
-
# ---------------------------------------------------------
|
128 |
-
|
129 |
-
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
|
130 |
-
|
131 |
-
if self.training and self.gradient_checkpointing:
|
132 |
-
|
133 |
-
def create_custom_forward(module):
|
134 |
-
def custom_forward(*inputs):
|
135 |
-
return module(*inputs)
|
136 |
-
|
137 |
-
return custom_forward
|
138 |
-
|
139 |
-
if is_torch_version(">=", "1.11.0"):
|
140 |
-
hidden_states = torch.utils.checkpoint.checkpoint(
|
141 |
-
create_custom_forward(resnet), hidden_states, temb, use_reentrant=False
|
142 |
-
)
|
143 |
-
else:
|
144 |
-
hidden_states = torch.utils.checkpoint.checkpoint(
|
145 |
-
create_custom_forward(resnet), hidden_states, temb
|
146 |
-
)
|
147 |
-
else:
|
148 |
-
hidden_states = resnet(hidden_states, temb)
|
149 |
-
|
150 |
-
if self.upsamplers is not None:
|
151 |
-
for upsampler in self.upsamplers:
|
152 |
-
hidden_states = upsampler(hidden_states, upsample_size)
|
153 |
-
|
154 |
-
return hidden_states
|
155 |
-
|
156 |
-
return forward
|
157 |
-
|
158 |
-
for i, upsample_block in enumerate(model.unet.up_blocks):
|
159 |
-
if isinstance_str(upsample_block, "UpBlock2D"):
|
160 |
-
upsample_block.forward = up_forward(upsample_block)
|
161 |
-
setattr(upsample_block, 'b1', b1)
|
162 |
-
setattr(upsample_block, 'b2', b2)
|
163 |
-
setattr(upsample_block, 's1', s1)
|
164 |
-
setattr(upsample_block, 's2', s2)
|
165 |
-
|
166 |
-
|
167 |
-
def register_crossattn_upblock2d(model):
|
168 |
-
def up_forward(self):
|
169 |
-
def forward(
|
170 |
-
hidden_states: torch.FloatTensor,
|
171 |
-
res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],
|
172 |
-
temb: Optional[torch.FloatTensor] = None,
|
173 |
-
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
174 |
-
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
175 |
-
upsample_size: Optional[int] = None,
|
176 |
-
attention_mask: Optional[torch.FloatTensor] = None,
|
177 |
-
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
178 |
-
):
|
179 |
-
for resnet, attn in zip(self.resnets, self.attentions):
|
180 |
-
# pop res hidden states
|
181 |
-
#print(f"in crossatten upblock2d, hidden states shape: {hidden_states.shape}")
|
182 |
-
res_hidden_states = res_hidden_states_tuple[-1]
|
183 |
-
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
|
184 |
-
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
|
185 |
-
|
186 |
-
if self.training and self.gradient_checkpointing:
|
187 |
-
|
188 |
-
def create_custom_forward(module, return_dict=None):
|
189 |
-
def custom_forward(*inputs):
|
190 |
-
if return_dict is not None:
|
191 |
-
return module(*inputs, return_dict=return_dict)
|
192 |
-
else:
|
193 |
-
return module(*inputs)
|
194 |
-
|
195 |
-
return custom_forward
|
196 |
-
|
197 |
-
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
|
198 |
-
hidden_states = torch.utils.checkpoint.checkpoint(
|
199 |
-
create_custom_forward(resnet),
|
200 |
-
hidden_states,
|
201 |
-
temb,
|
202 |
-
**ckpt_kwargs,
|
203 |
-
)
|
204 |
-
hidden_states = torch.utils.checkpoint.checkpoint(
|
205 |
-
create_custom_forward(attn, return_dict=False),
|
206 |
-
hidden_states,
|
207 |
-
encoder_hidden_states,
|
208 |
-
None, # timestep
|
209 |
-
None, # class_labels
|
210 |
-
cross_attention_kwargs,
|
211 |
-
attention_mask,
|
212 |
-
encoder_attention_mask,
|
213 |
-
**ckpt_kwargs,
|
214 |
-
)[0]
|
215 |
-
else:
|
216 |
-
hidden_states = resnet(hidden_states, temb)
|
217 |
-
hidden_states = attn(
|
218 |
-
hidden_states,
|
219 |
-
encoder_hidden_states=encoder_hidden_states,
|
220 |
-
cross_attention_kwargs=cross_attention_kwargs,
|
221 |
-
attention_mask=attention_mask,
|
222 |
-
encoder_attention_mask=encoder_attention_mask,
|
223 |
-
return_dict=False,
|
224 |
-
)[0]
|
225 |
-
|
226 |
-
if self.upsamplers is not None:
|
227 |
-
for upsampler in self.upsamplers:
|
228 |
-
hidden_states = upsampler(hidden_states, upsample_size)
|
229 |
-
|
230 |
-
return hidden_states
|
231 |
-
|
232 |
-
return forward
|
233 |
-
|
234 |
-
for i, upsample_block in enumerate(model.unet.up_blocks):
|
235 |
-
if isinstance_str(upsample_block, "CrossAttnUpBlock2D"):
|
236 |
-
upsample_block.forward = up_forward(upsample_block)
|
237 |
-
|
238 |
-
|
239 |
-
def register_free_crossattn_upblock2d(model, b1=1.2, b2=1.4, s1=0.9, s2=0.2):
|
240 |
-
def up_forward(self):
|
241 |
-
def forward(
|
242 |
-
hidden_states: torch.FloatTensor,
|
243 |
-
res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],
|
244 |
-
temb: Optional[torch.FloatTensor] = None,
|
245 |
-
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
246 |
-
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
247 |
-
upsample_size: Optional[int] = None,
|
248 |
-
attention_mask: Optional[torch.FloatTensor] = None,
|
249 |
-
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
250 |
-
):
|
251 |
-
for resnet, attn in zip(self.resnets, self.attentions):
|
252 |
-
# pop res hidden states
|
253 |
-
#print(f"in free crossatten upblock2d, hidden states shape: {hidden_states.shape}")
|
254 |
-
res_hidden_states = res_hidden_states_tuple[-1]
|
255 |
-
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
|
256 |
-
|
257 |
-
# --------------- FreeU code -----------------------
|
258 |
-
# Only operate on the first two stages
|
259 |
-
if hidden_states.shape[1] == 1280:
|
260 |
-
hidden_mean = hidden_states.mean(1).unsqueeze(1)
|
261 |
-
B = hidden_mean.shape[0]
|
262 |
-
hidden_max, _ = torch.max(hidden_mean.view(B, -1), dim=-1, keepdim=True)
|
263 |
-
hidden_min, _ = torch.min(hidden_mean.view(B, -1), dim=-1, keepdim=True)
|
264 |
-
|
265 |
-
hidden_mean = (hidden_mean - hidden_min.unsqueeze(2).unsqueeze(3)) / (hidden_max - hidden_min).unsqueeze(2).unsqueeze(3)
|
266 |
-
|
267 |
-
hidden_states[:,:640] = hidden_states[:,:640] * ((self.b1 - 1 ) * hidden_mean + 1)
|
268 |
-
res_hidden_states = Fourier_filter(res_hidden_states, threshold=1, scale=self.s1)
|
269 |
-
if hidden_states.shape[1] == 640:
|
270 |
-
hidden_mean = hidden_states.mean(1).unsqueeze(1)
|
271 |
-
B = hidden_mean.shape[0]
|
272 |
-
hidden_max, _ = torch.max(hidden_mean.view(B, -1), dim=-1, keepdim=True)
|
273 |
-
hidden_min, _ = torch.min(hidden_mean.view(B, -1), dim=-1, keepdim=True)
|
274 |
-
hidden_mean = (hidden_mean - hidden_min.unsqueeze(2).unsqueeze(3)) / (hidden_max - hidden_min).unsqueeze(2).unsqueeze(3)
|
275 |
-
|
276 |
-
hidden_states[:,:320] = hidden_states[:,:320] * ((self.b2 - 1 ) * hidden_mean + 1)
|
277 |
-
res_hidden_states = Fourier_filter(res_hidden_states, threshold=1, scale=self.s2)
|
278 |
-
# ---------------------------------------------------------
|
279 |
-
|
280 |
-
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
|
281 |
-
|
282 |
-
if self.training and self.gradient_checkpointing:
|
283 |
-
|
284 |
-
def create_custom_forward(module, return_dict=None):
|
285 |
-
def custom_forward(*inputs):
|
286 |
-
if return_dict is not None:
|
287 |
-
return module(*inputs, return_dict=return_dict)
|
288 |
-
else:
|
289 |
-
return module(*inputs)
|
290 |
-
|
291 |
-
return custom_forward
|
292 |
-
|
293 |
-
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
|
294 |
-
hidden_states = torch.utils.checkpoint.checkpoint(
|
295 |
-
create_custom_forward(resnet),
|
296 |
-
hidden_states,
|
297 |
-
temb,
|
298 |
-
**ckpt_kwargs,
|
299 |
-
)
|
300 |
-
hidden_states = torch.utils.checkpoint.checkpoint(
|
301 |
-
create_custom_forward(attn, return_dict=False),
|
302 |
-
hidden_states,
|
303 |
-
encoder_hidden_states,
|
304 |
-
None, # timestep
|
305 |
-
None, # class_labels
|
306 |
-
cross_attention_kwargs,
|
307 |
-
attention_mask,
|
308 |
-
encoder_attention_mask,
|
309 |
-
**ckpt_kwargs,
|
310 |
-
)[0]
|
311 |
-
else:
|
312 |
-
hidden_states = resnet(hidden_states, temb)
|
313 |
-
# hidden_states = attn(
|
314 |
-
# hidden_states,
|
315 |
-
# encoder_hidden_states=encoder_hidden_states,
|
316 |
-
# cross_attention_kwargs=cross_attention_kwargs,
|
317 |
-
# encoder_attention_mask=encoder_attention_mask,
|
318 |
-
# return_dict=False,
|
319 |
-
# )[0]
|
320 |
-
hidden_states = attn(
|
321 |
-
hidden_states,
|
322 |
-
encoder_hidden_states=encoder_hidden_states,
|
323 |
-
cross_attention_kwargs=cross_attention_kwargs,
|
324 |
-
)[0]
|
325 |
-
|
326 |
-
if self.upsamplers is not None:
|
327 |
-
for upsampler in self.upsamplers:
|
328 |
-
hidden_states = upsampler(hidden_states, upsample_size)
|
329 |
-
|
330 |
-
return hidden_states
|
331 |
-
|
332 |
-
return forward
|
333 |
-
|
334 |
-
for i, upsample_block in enumerate(model.unet.up_blocks):
|
335 |
-
if isinstance_str(upsample_block, "CrossAttnUpBlock2D"):
|
336 |
-
upsample_block.forward = up_forward(upsample_block)
|
337 |
-
setattr(upsample_block, 'b1', b1)
|
338 |
-
setattr(upsample_block, 'b2', b2)
|
339 |
-
setattr(upsample_block, 's1', s1)
|
340 |
-
setattr(upsample_block, 's2', s2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/dateutil/tz/__init__.py
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
from .tz import *
|
3 |
-
from .tz import __doc__
|
4 |
-
|
5 |
-
__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
|
6 |
-
"tzstr", "tzical", "tzwin", "tzwinlocal", "gettz",
|
7 |
-
"enfold", "datetime_ambiguous", "datetime_exists",
|
8 |
-
"resolve_imaginary", "UTC", "DeprecatedTzFormatWarning"]
|
9 |
-
|
10 |
-
|
11 |
-
class DeprecatedTzFormatWarning(Warning):
|
12 |
-
"""Warning raised when time zones are parsed from deprecated formats."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/parquet.py
DELETED
@@ -1,551 +0,0 @@
|
|
1 |
-
import io
|
2 |
-
import json
|
3 |
-
import warnings
|
4 |
-
|
5 |
-
from .core import url_to_fs
|
6 |
-
from .utils import merge_offset_ranges
|
7 |
-
|
8 |
-
# Parquet-Specific Utilities for fsspec
|
9 |
-
#
|
10 |
-
# Most of the functions defined in this module are NOT
|
11 |
-
# intended for public consumption. The only exception
|
12 |
-
# to this is `open_parquet_file`, which should be used
|
13 |
-
# place of `fs.open()` to open parquet-formatted files
|
14 |
-
# on remote file systems.
|
15 |
-
|
16 |
-
|
17 |
-
def open_parquet_file(
|
18 |
-
path,
|
19 |
-
mode="rb",
|
20 |
-
fs=None,
|
21 |
-
metadata=None,
|
22 |
-
columns=None,
|
23 |
-
row_groups=None,
|
24 |
-
storage_options=None,
|
25 |
-
strict=False,
|
26 |
-
engine="auto",
|
27 |
-
max_gap=64_000,
|
28 |
-
max_block=256_000_000,
|
29 |
-
footer_sample_size=1_000_000,
|
30 |
-
**kwargs,
|
31 |
-
):
|
32 |
-
"""
|
33 |
-
Return a file-like object for a single Parquet file.
|
34 |
-
|
35 |
-
The specified parquet `engine` will be used to parse the
|
36 |
-
footer metadata, and determine the required byte ranges
|
37 |
-
from the file. The target path will then be opened with
|
38 |
-
the "parts" (`KnownPartsOfAFile`) caching strategy.
|
39 |
-
|
40 |
-
Note that this method is intended for usage with remote
|
41 |
-
file systems, and is unlikely to improve parquet-read
|
42 |
-
performance on local file systems.
|
43 |
-
|
44 |
-
Parameters
|
45 |
-
----------
|
46 |
-
path: str
|
47 |
-
Target file path.
|
48 |
-
mode: str, optional
|
49 |
-
Mode option to be passed through to `fs.open`. Default is "rb".
|
50 |
-
metadata: Any, optional
|
51 |
-
Parquet metadata object. Object type must be supported
|
52 |
-
by the backend parquet engine. For now, only the "fastparquet"
|
53 |
-
engine supports an explicit `ParquetFile` metadata object.
|
54 |
-
If a metadata object is supplied, the remote footer metadata
|
55 |
-
will not need to be transferred into local memory.
|
56 |
-
fs: AbstractFileSystem, optional
|
57 |
-
Filesystem object to use for opening the file. If nothing is
|
58 |
-
specified, an `AbstractFileSystem` object will be inferred.
|
59 |
-
engine : str, default "auto"
|
60 |
-
Parquet engine to use for metadata parsing. Allowed options
|
61 |
-
include "fastparquet", "pyarrow", and "auto". The specified
|
62 |
-
engine must be installed in the current environment. If
|
63 |
-
"auto" is specified, and both engines are installed,
|
64 |
-
"fastparquet" will take precedence over "pyarrow".
|
65 |
-
columns: list, optional
|
66 |
-
List of all column names that may be read from the file.
|
67 |
-
row_groups : list, optional
|
68 |
-
List of all row-groups that may be read from the file. This
|
69 |
-
may be a list of row-group indices (integers), or it may be
|
70 |
-
a list of `RowGroup` metadata objects (if the "fastparquet"
|
71 |
-
engine is used).
|
72 |
-
storage_options : dict, optional
|
73 |
-
Used to generate an `AbstractFileSystem` object if `fs` was
|
74 |
-
not specified.
|
75 |
-
strict : bool, optional
|
76 |
-
Whether the resulting `KnownPartsOfAFile` cache should
|
77 |
-
fetch reads that go beyond a known byte-range boundary.
|
78 |
-
If `False` (the default), any read that ends outside a
|
79 |
-
known part will be zero padded. Note that using
|
80 |
-
`strict=True` may be useful for debugging.
|
81 |
-
max_gap : int, optional
|
82 |
-
Neighboring byte ranges will only be merged when their
|
83 |
-
inter-range gap is <= `max_gap`. Default is 64KB.
|
84 |
-
max_block : int, optional
|
85 |
-
Neighboring byte ranges will only be merged when the size of
|
86 |
-
the aggregated range is <= `max_block`. Default is 256MB.
|
87 |
-
footer_sample_size : int, optional
|
88 |
-
Number of bytes to read from the end of the path to look
|
89 |
-
for the footer metadata. If the sampled bytes do not contain
|
90 |
-
the footer, a second read request will be required, and
|
91 |
-
performance will suffer. Default is 1MB.
|
92 |
-
**kwargs :
|
93 |
-
Optional key-word arguments to pass to `fs.open`
|
94 |
-
"""
|
95 |
-
|
96 |
-
# Make sure we have an `AbstractFileSystem` object
|
97 |
-
# to work with
|
98 |
-
if fs is None:
|
99 |
-
fs = url_to_fs(path, **(storage_options or {}))[0]
|
100 |
-
|
101 |
-
# For now, `columns == []` not supported. Just use
|
102 |
-
# default `open` command with `path` input
|
103 |
-
if columns is not None and len(columns) == 0:
|
104 |
-
return fs.open(path, mode=mode)
|
105 |
-
|
106 |
-
# Set the engine
|
107 |
-
engine = _set_engine(engine)
|
108 |
-
|
109 |
-
# Fetch the known byte ranges needed to read
|
110 |
-
# `columns` and/or `row_groups`
|
111 |
-
data = _get_parquet_byte_ranges(
|
112 |
-
[path],
|
113 |
-
fs,
|
114 |
-
metadata=metadata,
|
115 |
-
columns=columns,
|
116 |
-
row_groups=row_groups,
|
117 |
-
engine=engine,
|
118 |
-
max_gap=max_gap,
|
119 |
-
max_block=max_block,
|
120 |
-
footer_sample_size=footer_sample_size,
|
121 |
-
)
|
122 |
-
|
123 |
-
# Extract file name from `data`
|
124 |
-
fn = next(iter(data)) if data else path
|
125 |
-
|
126 |
-
# Call self.open with "parts" caching
|
127 |
-
options = kwargs.pop("cache_options", {}).copy()
|
128 |
-
return fs.open(
|
129 |
-
fn,
|
130 |
-
mode=mode,
|
131 |
-
cache_type="parts",
|
132 |
-
cache_options={
|
133 |
-
**options,
|
134 |
-
**{
|
135 |
-
"data": data.get(fn, {}),
|
136 |
-
"strict": strict,
|
137 |
-
},
|
138 |
-
},
|
139 |
-
**kwargs,
|
140 |
-
)
|
141 |
-
|
142 |
-
|
143 |
-
def _get_parquet_byte_ranges(
|
144 |
-
paths,
|
145 |
-
fs,
|
146 |
-
metadata=None,
|
147 |
-
columns=None,
|
148 |
-
row_groups=None,
|
149 |
-
max_gap=64_000,
|
150 |
-
max_block=256_000_000,
|
151 |
-
footer_sample_size=1_000_000,
|
152 |
-
engine="auto",
|
153 |
-
):
|
154 |
-
"""Get a dictionary of the known byte ranges needed
|
155 |
-
to read a specific column/row-group selection from a
|
156 |
-
Parquet dataset. Each value in the output dictionary
|
157 |
-
is intended for use as the `data` argument for the
|
158 |
-
`KnownPartsOfAFile` caching strategy of a single path.
|
159 |
-
"""
|
160 |
-
|
161 |
-
# Set engine if necessary
|
162 |
-
if isinstance(engine, str):
|
163 |
-
engine = _set_engine(engine)
|
164 |
-
|
165 |
-
# Pass to specialized function if metadata is defined
|
166 |
-
if metadata is not None:
|
167 |
-
|
168 |
-
# Use the provided parquet metadata object
|
169 |
-
# to avoid transferring/parsing footer metadata
|
170 |
-
return _get_parquet_byte_ranges_from_metadata(
|
171 |
-
metadata,
|
172 |
-
fs,
|
173 |
-
engine,
|
174 |
-
columns=columns,
|
175 |
-
row_groups=row_groups,
|
176 |
-
max_gap=max_gap,
|
177 |
-
max_block=max_block,
|
178 |
-
)
|
179 |
-
|
180 |
-
# Get file sizes asynchronously
|
181 |
-
file_sizes = fs.sizes(paths)
|
182 |
-
|
183 |
-
# Populate global paths, starts, & ends
|
184 |
-
result = {}
|
185 |
-
data_paths = []
|
186 |
-
data_starts = []
|
187 |
-
data_ends = []
|
188 |
-
add_header_magic = True
|
189 |
-
if columns is None and row_groups is None:
|
190 |
-
# We are NOT selecting specific columns or row-groups.
|
191 |
-
#
|
192 |
-
# We can avoid sampling the footers, and just transfer
|
193 |
-
# all file data with cat_ranges
|
194 |
-
for i, path in enumerate(paths):
|
195 |
-
result[path] = {}
|
196 |
-
for b in range(0, file_sizes[i], max_block):
|
197 |
-
data_paths.append(path)
|
198 |
-
data_starts.append(b)
|
199 |
-
data_ends.append(min(b + max_block, file_sizes[i]))
|
200 |
-
add_header_magic = False # "Magic" should already be included
|
201 |
-
else:
|
202 |
-
# We ARE selecting specific columns or row-groups.
|
203 |
-
#
|
204 |
-
# Gather file footers.
|
205 |
-
# We just take the last `footer_sample_size` bytes of each
|
206 |
-
# file (or the entire file if it is smaller than that)
|
207 |
-
footer_starts = []
|
208 |
-
footer_ends = []
|
209 |
-
for i, path in enumerate(paths):
|
210 |
-
footer_ends.append(file_sizes[i])
|
211 |
-
sample_size = max(0, file_sizes[i] - footer_sample_size)
|
212 |
-
footer_starts.append(sample_size)
|
213 |
-
footer_samples = fs.cat_ranges(paths, footer_starts, footer_ends)
|
214 |
-
|
215 |
-
# Check our footer samples and re-sample if necessary.
|
216 |
-
missing_footer_starts = footer_starts.copy()
|
217 |
-
large_footer = 0
|
218 |
-
for i, path in enumerate(paths):
|
219 |
-
footer_size = int.from_bytes(footer_samples[i][-8:-4], "little")
|
220 |
-
real_footer_start = file_sizes[i] - (footer_size + 8)
|
221 |
-
if real_footer_start < footer_starts[i]:
|
222 |
-
missing_footer_starts[i] = real_footer_start
|
223 |
-
large_footer = max(large_footer, (footer_size + 8))
|
224 |
-
if large_footer:
|
225 |
-
warnings.warn(
|
226 |
-
f"Not enough data was used to sample the parquet footer. "
|
227 |
-
f"Try setting footer_sample_size >= {large_footer}."
|
228 |
-
)
|
229 |
-
for i, block in enumerate(
|
230 |
-
fs.cat_ranges(
|
231 |
-
paths,
|
232 |
-
missing_footer_starts,
|
233 |
-
footer_starts,
|
234 |
-
)
|
235 |
-
):
|
236 |
-
footer_samples[i] = block + footer_samples[i]
|
237 |
-
footer_starts[i] = missing_footer_starts[i]
|
238 |
-
|
239 |
-
# Calculate required byte ranges for each path
|
240 |
-
for i, path in enumerate(paths):
|
241 |
-
|
242 |
-
# Deal with small-file case.
|
243 |
-
# Just include all remaining bytes of the file
|
244 |
-
# in a single range.
|
245 |
-
if file_sizes[i] < max_block:
|
246 |
-
if footer_starts[i] > 0:
|
247 |
-
# Only need to transfer the data if the
|
248 |
-
# footer sample isn't already the whole file
|
249 |
-
data_paths.append(path)
|
250 |
-
data_starts.append(0)
|
251 |
-
data_ends.append(footer_starts[i])
|
252 |
-
continue
|
253 |
-
|
254 |
-
# Use "engine" to collect data byte ranges
|
255 |
-
path_data_starts, path_data_ends = engine._parquet_byte_ranges(
|
256 |
-
columns,
|
257 |
-
row_groups=row_groups,
|
258 |
-
footer=footer_samples[i],
|
259 |
-
footer_start=footer_starts[i],
|
260 |
-
)
|
261 |
-
|
262 |
-
data_paths += [path] * len(path_data_starts)
|
263 |
-
data_starts += path_data_starts
|
264 |
-
data_ends += path_data_ends
|
265 |
-
|
266 |
-
# Merge adjacent offset ranges
|
267 |
-
data_paths, data_starts, data_ends = merge_offset_ranges(
|
268 |
-
data_paths,
|
269 |
-
data_starts,
|
270 |
-
data_ends,
|
271 |
-
max_gap=max_gap,
|
272 |
-
max_block=max_block,
|
273 |
-
sort=False, # Should already be sorted
|
274 |
-
)
|
275 |
-
|
276 |
-
# Start by populating `result` with footer samples
|
277 |
-
for i, path in enumerate(paths):
|
278 |
-
result[path] = {(footer_starts[i], footer_ends[i]): footer_samples[i]}
|
279 |
-
|
280 |
-
# Transfer the data byte-ranges into local memory
|
281 |
-
_transfer_ranges(fs, result, data_paths, data_starts, data_ends)
|
282 |
-
|
283 |
-
# Add b"PAR1" to header if necessary
|
284 |
-
if add_header_magic:
|
285 |
-
_add_header_magic(result)
|
286 |
-
|
287 |
-
return result
|
288 |
-
|
289 |
-
|
290 |
-
def _get_parquet_byte_ranges_from_metadata(
|
291 |
-
metadata,
|
292 |
-
fs,
|
293 |
-
engine,
|
294 |
-
columns=None,
|
295 |
-
row_groups=None,
|
296 |
-
max_gap=64_000,
|
297 |
-
max_block=256_000_000,
|
298 |
-
):
|
299 |
-
"""Simplified version of `_get_parquet_byte_ranges` for
|
300 |
-
the case that an engine-specific `metadata` object is
|
301 |
-
provided, and the remote footer metadata does not need to
|
302 |
-
be transferred before calculating the required byte ranges.
|
303 |
-
"""
|
304 |
-
|
305 |
-
# Use "engine" to collect data byte ranges
|
306 |
-
data_paths, data_starts, data_ends = engine._parquet_byte_ranges(
|
307 |
-
columns,
|
308 |
-
row_groups=row_groups,
|
309 |
-
metadata=metadata,
|
310 |
-
)
|
311 |
-
|
312 |
-
# Merge adjacent offset ranges
|
313 |
-
data_paths, data_starts, data_ends = merge_offset_ranges(
|
314 |
-
data_paths,
|
315 |
-
data_starts,
|
316 |
-
data_ends,
|
317 |
-
max_gap=max_gap,
|
318 |
-
max_block=max_block,
|
319 |
-
sort=False, # Should be sorted
|
320 |
-
)
|
321 |
-
|
322 |
-
# Transfer the data byte-ranges into local memory
|
323 |
-
result = {fn: {} for fn in list(set(data_paths))}
|
324 |
-
_transfer_ranges(fs, result, data_paths, data_starts, data_ends)
|
325 |
-
|
326 |
-
# Add b"PAR1" to header
|
327 |
-
_add_header_magic(result)
|
328 |
-
|
329 |
-
return result
|
330 |
-
|
331 |
-
|
332 |
-
def _transfer_ranges(fs, blocks, paths, starts, ends):
|
333 |
-
# Use cat_ranges to gather the data byte_ranges
|
334 |
-
ranges = (paths, starts, ends)
|
335 |
-
for path, start, stop, data in zip(*ranges, fs.cat_ranges(*ranges)):
|
336 |
-
blocks[path][(start, stop)] = data
|
337 |
-
|
338 |
-
|
339 |
-
def _add_header_magic(data):
|
340 |
-
# Add b"PAR1" to file headers
|
341 |
-
for i, path in enumerate(list(data.keys())):
|
342 |
-
add_magic = True
|
343 |
-
for k in data[path].keys():
|
344 |
-
if k[0] == 0 and k[1] >= 4:
|
345 |
-
add_magic = False
|
346 |
-
break
|
347 |
-
if add_magic:
|
348 |
-
data[path][(0, 4)] = b"PAR1"
|
349 |
-
|
350 |
-
|
351 |
-
def _set_engine(engine_str):
|
352 |
-
|
353 |
-
# Define a list of parquet engines to try
|
354 |
-
if engine_str == "auto":
|
355 |
-
try_engines = ("fastparquet", "pyarrow")
|
356 |
-
elif not isinstance(engine_str, str):
|
357 |
-
raise ValueError(
|
358 |
-
"Failed to set parquet engine! "
|
359 |
-
"Please pass 'fastparquet', 'pyarrow', or 'auto'"
|
360 |
-
)
|
361 |
-
elif engine_str not in ("fastparquet", "pyarrow"):
|
362 |
-
raise ValueError(f"{engine_str} engine not supported by `fsspec.parquet`")
|
363 |
-
else:
|
364 |
-
try_engines = [engine_str]
|
365 |
-
|
366 |
-
# Try importing the engines in `try_engines`,
|
367 |
-
# and choose the first one that succeeds
|
368 |
-
for engine in try_engines:
|
369 |
-
try:
|
370 |
-
if engine == "fastparquet":
|
371 |
-
return FastparquetEngine()
|
372 |
-
elif engine == "pyarrow":
|
373 |
-
return PyarrowEngine()
|
374 |
-
except ImportError:
|
375 |
-
pass
|
376 |
-
|
377 |
-
# Raise an error if a supported parquet engine
|
378 |
-
# was not found
|
379 |
-
raise ImportError(
|
380 |
-
f"The following parquet engines are not installed "
|
381 |
-
f"in your python environment: {try_engines}."
|
382 |
-
f"Please install 'fastparquert' or 'pyarrow' to "
|
383 |
-
f"utilize the `fsspec.parquet` module."
|
384 |
-
)
|
385 |
-
|
386 |
-
|
387 |
-
class FastparquetEngine:
|
388 |
-
|
389 |
-
# The purpose of the FastparquetEngine class is
|
390 |
-
# to check if fastparquet can be imported (on initialization)
|
391 |
-
# and to define a `_parquet_byte_ranges` method. In the
|
392 |
-
# future, this class may also be used to define other
|
393 |
-
# methods/logic that are specific to fastparquet.
|
394 |
-
|
395 |
-
def __init__(self):
|
396 |
-
import fastparquet as fp
|
397 |
-
|
398 |
-
self.fp = fp
|
399 |
-
|
400 |
-
def _row_group_filename(self, row_group, pf):
|
401 |
-
return pf.row_group_filename(row_group)
|
402 |
-
|
403 |
-
def _parquet_byte_ranges(
|
404 |
-
self,
|
405 |
-
columns,
|
406 |
-
row_groups=None,
|
407 |
-
metadata=None,
|
408 |
-
footer=None,
|
409 |
-
footer_start=None,
|
410 |
-
):
|
411 |
-
|
412 |
-
# Initialize offset ranges and define ParqetFile metadata
|
413 |
-
pf = metadata
|
414 |
-
data_paths, data_starts, data_ends = [], [], []
|
415 |
-
if pf is None:
|
416 |
-
pf = self.fp.ParquetFile(io.BytesIO(footer))
|
417 |
-
|
418 |
-
# Convert columns to a set and add any index columns
|
419 |
-
# specified in the pandas metadata (just in case)
|
420 |
-
column_set = None if columns is None else set(columns)
|
421 |
-
if column_set is not None and hasattr(pf, "pandas_metadata"):
|
422 |
-
md_index = [
|
423 |
-
ind
|
424 |
-
for ind in pf.pandas_metadata.get("index_columns", [])
|
425 |
-
# Ignore RangeIndex information
|
426 |
-
if not isinstance(ind, dict)
|
427 |
-
]
|
428 |
-
column_set |= set(md_index)
|
429 |
-
|
430 |
-
# Check if row_groups is a list of integers
|
431 |
-
# or a list of row-group metadata
|
432 |
-
if row_groups and not isinstance(row_groups[0], int):
|
433 |
-
# Input row_groups contains row-group metadata
|
434 |
-
row_group_indices = None
|
435 |
-
else:
|
436 |
-
# Input row_groups contains row-group indices
|
437 |
-
row_group_indices = row_groups
|
438 |
-
row_groups = pf.row_groups
|
439 |
-
|
440 |
-
# Loop through column chunks to add required byte ranges
|
441 |
-
for r, row_group in enumerate(row_groups):
|
442 |
-
# Skip this row-group if we are targeting
|
443 |
-
# specific row-groups
|
444 |
-
if row_group_indices is None or r in row_group_indices:
|
445 |
-
|
446 |
-
# Find the target parquet-file path for `row_group`
|
447 |
-
fn = self._row_group_filename(row_group, pf)
|
448 |
-
|
449 |
-
for column in row_group.columns:
|
450 |
-
name = column.meta_data.path_in_schema[0]
|
451 |
-
# Skip this column if we are targeting a
|
452 |
-
# specific columns
|
453 |
-
if column_set is None or name in column_set:
|
454 |
-
file_offset0 = column.meta_data.dictionary_page_offset
|
455 |
-
if file_offset0 is None:
|
456 |
-
file_offset0 = column.meta_data.data_page_offset
|
457 |
-
num_bytes = column.meta_data.total_compressed_size
|
458 |
-
if footer_start is None or file_offset0 < footer_start:
|
459 |
-
data_paths.append(fn)
|
460 |
-
data_starts.append(file_offset0)
|
461 |
-
data_ends.append(
|
462 |
-
min(
|
463 |
-
file_offset0 + num_bytes,
|
464 |
-
footer_start or (file_offset0 + num_bytes),
|
465 |
-
)
|
466 |
-
)
|
467 |
-
|
468 |
-
if metadata:
|
469 |
-
# The metadata in this call may map to multiple
|
470 |
-
# file paths. Need to include `data_paths`
|
471 |
-
return data_paths, data_starts, data_ends
|
472 |
-
return data_starts, data_ends
|
473 |
-
|
474 |
-
|
475 |
-
class PyarrowEngine:
|
476 |
-
|
477 |
-
# The purpose of the PyarrowEngine class is
|
478 |
-
# to check if pyarrow can be imported (on initialization)
|
479 |
-
# and to define a `_parquet_byte_ranges` method. In the
|
480 |
-
# future, this class may also be used to define other
|
481 |
-
# methods/logic that are specific to pyarrow.
|
482 |
-
|
483 |
-
def __init__(self):
|
484 |
-
import pyarrow.parquet as pq
|
485 |
-
|
486 |
-
self.pq = pq
|
487 |
-
|
488 |
-
def _row_group_filename(self, row_group, metadata):
|
489 |
-
raise NotImplementedError
|
490 |
-
|
491 |
-
def _parquet_byte_ranges(
|
492 |
-
self,
|
493 |
-
columns,
|
494 |
-
row_groups=None,
|
495 |
-
metadata=None,
|
496 |
-
footer=None,
|
497 |
-
footer_start=None,
|
498 |
-
):
|
499 |
-
|
500 |
-
if metadata is not None:
|
501 |
-
raise ValueError("metadata input not supported for PyarrowEngine")
|
502 |
-
|
503 |
-
data_starts, data_ends = [], []
|
504 |
-
md = self.pq.ParquetFile(io.BytesIO(footer)).metadata
|
505 |
-
|
506 |
-
# Convert columns to a set and add any index columns
|
507 |
-
# specified in the pandas metadata (just in case)
|
508 |
-
column_set = None if columns is None else set(columns)
|
509 |
-
if column_set is not None:
|
510 |
-
schema = md.schema.to_arrow_schema()
|
511 |
-
has_pandas_metadata = (
|
512 |
-
schema.metadata is not None and b"pandas" in schema.metadata
|
513 |
-
)
|
514 |
-
if has_pandas_metadata:
|
515 |
-
md_index = [
|
516 |
-
ind
|
517 |
-
for ind in json.loads(
|
518 |
-
schema.metadata[b"pandas"].decode("utf8")
|
519 |
-
).get("index_columns", [])
|
520 |
-
# Ignore RangeIndex information
|
521 |
-
if not isinstance(ind, dict)
|
522 |
-
]
|
523 |
-
column_set |= set(md_index)
|
524 |
-
|
525 |
-
# Loop through column chunks to add required byte ranges
|
526 |
-
for r in range(md.num_row_groups):
|
527 |
-
# Skip this row-group if we are targeting
|
528 |
-
# specific row-groups
|
529 |
-
if row_groups is None or r in row_groups:
|
530 |
-
row_group = md.row_group(r)
|
531 |
-
for c in range(row_group.num_columns):
|
532 |
-
column = row_group.column(c)
|
533 |
-
name = column.path_in_schema
|
534 |
-
# Skip this column if we are targeting a
|
535 |
-
# specific columns
|
536 |
-
split_name = name.split(".")[0]
|
537 |
-
if (
|
538 |
-
column_set is None
|
539 |
-
or name in column_set
|
540 |
-
or split_name in column_set
|
541 |
-
):
|
542 |
-
file_offset0 = column.dictionary_page_offset
|
543 |
-
if file_offset0 is None:
|
544 |
-
file_offset0 = column.data_page_offset
|
545 |
-
num_bytes = column.total_compressed_size
|
546 |
-
if file_offset0 < footer_start:
|
547 |
-
data_starts.append(file_offset0)
|
548 |
-
data_ends.append(
|
549 |
-
min(file_offset0 + num_bytes, footer_start)
|
550 |
-
)
|
551 |
-
return data_starts, data_ends
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/models/modules/transformer.py
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
from models.modules.transformer_modules import *
|
2 |
-
|
3 |
-
|
4 |
-
class Transformer(nn.Module):
|
5 |
-
def __init__(self, dim, depth, heads, win_size, dim_head, mlp_dim,
|
6 |
-
dropout=0., patch_num=None, ape=None, rpe=None, rpe_pos=1):
|
7 |
-
super().__init__()
|
8 |
-
|
9 |
-
self.absolute_pos_embed = None if patch_num is None or ape is None else AbsolutePosition(dim, dropout,
|
10 |
-
patch_num, ape)
|
11 |
-
self.pos_dropout = nn.Dropout(dropout)
|
12 |
-
self.layers = nn.ModuleList([])
|
13 |
-
for _ in range(depth):
|
14 |
-
self.layers.append(nn.ModuleList([
|
15 |
-
PreNorm(dim, Attention(dim, heads=heads, dim_head=dim_head, dropout=dropout, patch_num=patch_num,
|
16 |
-
rpe=rpe, rpe_pos=rpe_pos)),
|
17 |
-
PreNorm(dim, FeedForward(dim, mlp_dim, dropout=dropout))
|
18 |
-
]))
|
19 |
-
|
20 |
-
def forward(self, x):
|
21 |
-
if self.absolute_pos_embed is not None:
|
22 |
-
x = self.absolute_pos_embed(x)
|
23 |
-
x = self.pos_dropout(x)
|
24 |
-
for attn, ff in self.layers:
|
25 |
-
x = attn(x) + x
|
26 |
-
x = ff(x) + x
|
27 |
-
return x
|
28 |
-
|
29 |
-
|
30 |
-
if __name__ == '__main__':
|
31 |
-
token_dim = 1024
|
32 |
-
toke_len = 256
|
33 |
-
|
34 |
-
transformer = Transformer(dim=token_dim, depth=6, heads=16,
|
35 |
-
dim_head=64, mlp_dim=2048, dropout=0.1,
|
36 |
-
patch_num=256, ape='lr_parameter', rpe='lr_parameter_mirror')
|
37 |
-
|
38 |
-
total = sum(p.numel() for p in transformer.parameters())
|
39 |
-
trainable = sum(p.numel() for p in transformer.parameters() if p.requires_grad)
|
40 |
-
print('parameter total:{:,}, trainable:{:,}'.format(total, trainable))
|
41 |
-
|
42 |
-
input = torch.randn(1, toke_len, token_dim)
|
43 |
-
output = transformer(input)
|
44 |
-
print(output.shape)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DevashishBhake/Face_Mask_Detection/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Face Mask Detection
|
3 |
-
emoji: 🔥
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.27.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Docfile/open_llm_leaderboard/Makefile
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
.PHONY: style format
|
2 |
-
|
3 |
-
|
4 |
-
style:
|
5 |
-
python -m black --line-length 119 .
|
6 |
-
python -m isort .
|
7 |
-
ruff check --fix .
|
8 |
-
|
9 |
-
|
10 |
-
quality:
|
11 |
-
python -m black --check --line-length 119 .
|
12 |
-
python -m isort --check-only .
|
13 |
-
ruff check .
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DragGan/DragGan/stylegan_human/utils/__init__.py
DELETED
File without changes
|
spaces/Eddycrack864/Applio-Inference/diffq/diffq.py
DELETED
@@ -1,286 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
"""
|
8 |
-
Differentiable quantizer based on scaled noise injection.
|
9 |
-
"""
|
10 |
-
from dataclasses import dataclass
|
11 |
-
import math
|
12 |
-
import typing as tp
|
13 |
-
|
14 |
-
import torch
|
15 |
-
|
16 |
-
from .base import BaseQuantizer
|
17 |
-
from .uniform import uniform_quantize, uniform_unquantize
|
18 |
-
from .utils import simple_repr
|
19 |
-
|
20 |
-
|
21 |
-
class DiffQuantizer(BaseQuantizer):
|
22 |
-
@dataclass
|
23 |
-
class _QuantizedParam(BaseQuantizer._QuantizedParam):
|
24 |
-
logit: torch.nn.Parameter
|
25 |
-
|
26 |
-
def __init__(self, model: torch.nn.Module, min_size: float = 0.01, float16: bool = False,
|
27 |
-
group_size: int = 1, min_bits: float = 2, max_bits: float = 15,
|
28 |
-
param="bits", noise="gaussian",
|
29 |
-
init_bits: float = 8, extra_bits: float = 0, suffix: str = "_diffq",
|
30 |
-
exclude: tp.List[str] = [], detect_bound: bool = True):
|
31 |
-
"""
|
32 |
-
Differentiable quantizer based on scaled noise injection.
|
33 |
-
For every parameter `p` in the model, this introduces a number of bits parameter
|
34 |
-
`b` with the same dimensions (when group_size = 1).
|
35 |
-
Before each forward, `p` is replaced by `p + U`
|
36 |
-
with U uniform iid noise with range [-d/2, d/2], with `d` the uniform quantization
|
37 |
-
step for `b` bits.
|
38 |
-
This noise approximates the quantization noise in a differentiable manner, both
|
39 |
-
with respect to the unquantized parameter `p` and the number of bits `b`.
|
40 |
-
|
41 |
-
At eveluation (as detected with `model.eval()`), the model is replaced
|
42 |
-
by its true quantized version, and restored when going back to training.
|
43 |
-
|
44 |
-
When doing actual quantization (for serialization, or evaluation),
|
45 |
-
the number of bits is rounded to the nearest integer, and needs to be stored along.
|
46 |
-
This will cost a few bits per dimension. To reduce this cost, one can use `group_size`,
|
47 |
-
which will use a single noise level for multiple weight entries.
|
48 |
-
|
49 |
-
You can use the `DiffQuantizer.model_size` method to get a differentiable estimate of the
|
50 |
-
model size in MB. You can then use this estimate as a penalty in your training loss.
|
51 |
-
|
52 |
-
Args:
|
53 |
-
model (torch.nn.Module): model to quantize
|
54 |
-
min_size (float): minimum size in MB of a parameter to be quantized.
|
55 |
-
float16 (bool): if a layer is smaller than min_size, should we still do float16?
|
56 |
-
group_size (int): weight entries are groupped together to reduce the number
|
57 |
-
of noise scales to store. This should divide the size of all parameters
|
58 |
-
bigger than min_size.
|
59 |
-
min_bits (float): minimal number of bits.
|
60 |
-
max_bits (float): maximal number of bits.
|
61 |
-
init_bits (float): initial number of bits.
|
62 |
-
extra_bits (float): extra bits to add for actual quantization (before roundoff).
|
63 |
-
suffix (str): suffix used for the name of the extra noise scale parameters.
|
64 |
-
exclude (list[str]): list of patterns used to match parameters to exclude.
|
65 |
-
For instance `['bias']` to exclude all bias terms.
|
66 |
-
detect_bound (bool): if True, will detect bound parameters and reuse
|
67 |
-
the same quantized tensor for both, as well as the same number of bits.
|
68 |
-
|
69 |
-
..Warning::
|
70 |
-
You must call `model.training()` and `model.eval()` for `DiffQuantizer` work properly.
|
71 |
-
|
72 |
-
"""
|
73 |
-
self.group_size = group_size
|
74 |
-
self.min_bits = min_bits
|
75 |
-
self.max_bits = max_bits
|
76 |
-
self.init_bits = init_bits
|
77 |
-
self.extra_bits = extra_bits
|
78 |
-
self.suffix = suffix
|
79 |
-
self.param = param
|
80 |
-
self.noise = noise
|
81 |
-
assert noise in ["gaussian", "uniform"]
|
82 |
-
self._optimizer_setup = False
|
83 |
-
|
84 |
-
self._min_noise = 1 / (2 ** self.max_bits - 1)
|
85 |
-
self._max_noise = 1 / (2 ** self.min_bits - 1)
|
86 |
-
|
87 |
-
assert group_size >= 0
|
88 |
-
assert min_bits < init_bits < max_bits, \
|
89 |
-
"init_bits must be between min_bits and max_bits excluded3"
|
90 |
-
|
91 |
-
for name, _ in model.named_parameters():
|
92 |
-
if name.endswith(suffix):
|
93 |
-
raise RuntimeError("The model already has some noise scales parameters, "
|
94 |
-
"maybe you used twice a DiffQuantizer on the same model?.")
|
95 |
-
|
96 |
-
super().__init__(model, min_size, float16, exclude, detect_bound)
|
97 |
-
|
98 |
-
def _get_bits(self, logit: torch.Tensor):
|
99 |
-
if self.param == "noise":
|
100 |
-
return torch.log2(1 + 1 / self._get_noise_scale(logit))
|
101 |
-
else:
|
102 |
-
t = torch.sigmoid(logit)
|
103 |
-
return self.max_bits * t + (1 - t) * self.min_bits
|
104 |
-
|
105 |
-
def _get_noise_scale(self, logit: torch.Tensor):
|
106 |
-
if self.param == "noise":
|
107 |
-
t = torch.sigmoid(logit)
|
108 |
-
return torch.exp(t * math.log(self._min_noise) + (1 - t) * math.log(self._max_noise))
|
109 |
-
else:
|
110 |
-
return 1 / (2 ** self._get_bits(logit) - 1)
|
111 |
-
|
112 |
-
def _register_param(self, name, param, module, other):
|
113 |
-
if other is not None:
|
114 |
-
return self.__class__._QuantizedParam(
|
115 |
-
name=name, param=param, module=module, logit=other.logit, other=other)
|
116 |
-
assert self.group_size == 0 or param.numel() % self.group_size == 0
|
117 |
-
# we want the initial number of bits to be init_bits.
|
118 |
-
if self.param == "noise":
|
119 |
-
noise_scale = 1 / (2 ** self.init_bits - 1)
|
120 |
-
t = (math.log(noise_scale) - math.log(self._max_noise)) / (
|
121 |
-
math.log(self._min_noise) - math.log(self._max_noise))
|
122 |
-
else:
|
123 |
-
t = (self.init_bits - self.min_bits) / (self.max_bits - self.min_bits)
|
124 |
-
assert 0 < t < 1
|
125 |
-
logit = torch.logit(torch.tensor(float(t)))
|
126 |
-
assert abs(self._get_bits(logit) - self.init_bits) < 1e-5
|
127 |
-
if self.group_size > 0:
|
128 |
-
nparam = param.numel() // self.group_size
|
129 |
-
else:
|
130 |
-
nparam = 1
|
131 |
-
logit = torch.nn.Parameter(
|
132 |
-
torch.full(
|
133 |
-
(nparam,),
|
134 |
-
logit,
|
135 |
-
device=param.device))
|
136 |
-
module.register_parameter(name + self.suffix, logit)
|
137 |
-
return self.__class__._QuantizedParam(
|
138 |
-
name=name, param=param, module=module, logit=logit, other=None)
|
139 |
-
|
140 |
-
def clear_optimizer(self, optimizer: torch.optim.Optimizer):
|
141 |
-
params = [qp.logit for qp in self._qparams]
|
142 |
-
|
143 |
-
for group in optimizer.param_groups:
|
144 |
-
new_params = []
|
145 |
-
for q in list(group["params"]):
|
146 |
-
matched = False
|
147 |
-
for p in params:
|
148 |
-
if p is q:
|
149 |
-
matched = True
|
150 |
-
if not matched:
|
151 |
-
new_params.append(q)
|
152 |
-
group["params"][:] = new_params
|
153 |
-
|
154 |
-
def setup_optimizer(self, optimizer: torch.optim.Optimizer,
|
155 |
-
lr: float = 1e-3, **kwargs):
|
156 |
-
"""
|
157 |
-
Setup the optimizer to tune the number of bits. In particular, this will deactivate
|
158 |
-
weight decay for the bits parameters.
|
159 |
-
|
160 |
-
Args:
|
161 |
-
optimizer (torch.Optimizer): optimizer to use.
|
162 |
-
lr (float): specific learning rate for the bits parameters. 1e-3
|
163 |
-
is perfect for Adam.,w
|
164 |
-
kwargs (dict): overrides for other optimization parameters for the bits.
|
165 |
-
"""
|
166 |
-
assert not self._optimizer_setup
|
167 |
-
self._optimizer_setup = True
|
168 |
-
|
169 |
-
params = [qp.logit for qp in self._qparams]
|
170 |
-
|
171 |
-
for group in optimizer.param_groups:
|
172 |
-
for q in list(group["params"]):
|
173 |
-
for p in params:
|
174 |
-
if p is q:
|
175 |
-
raise RuntimeError("You should create the optimizer "
|
176 |
-
"before the quantizer!")
|
177 |
-
|
178 |
-
group = {"params": params, "lr": lr, "weight_decay": 0}
|
179 |
-
group.update(kwargs)
|
180 |
-
optimizer.add_param_group(group)
|
181 |
-
|
182 |
-
def no_optimizer(self):
|
183 |
-
"""
|
184 |
-
Call this if you do not want to use an optimizer.
|
185 |
-
"""
|
186 |
-
self._optimizer_setup = True
|
187 |
-
|
188 |
-
def check_unused(self):
|
189 |
-
for qparam in self._qparams:
|
190 |
-
if qparam.other is not None:
|
191 |
-
continue
|
192 |
-
grad = qparam.param.grad
|
193 |
-
if grad is None or (grad == 0).all():
|
194 |
-
if qparam.logit.grad is not None:
|
195 |
-
qparam.logit.grad.data.zero_()
|
196 |
-
|
197 |
-
def model_size(self, exact=False):
|
198 |
-
"""
|
199 |
-
Differentiable estimate of the model size.
|
200 |
-
The size is returned in MB.
|
201 |
-
|
202 |
-
If `exact` is True, then the output is no longer differentiable but
|
203 |
-
reflect exactly an achievable size, even without compression,
|
204 |
-
i.e.same as returned by `naive_model_size()`.
|
205 |
-
"""
|
206 |
-
total = super().model_size()
|
207 |
-
subtotal = 0
|
208 |
-
for qparam in self._qparams:
|
209 |
-
# only count the first appearance of a Parameter
|
210 |
-
if qparam.other is not None:
|
211 |
-
continue
|
212 |
-
bits = self.extra_bits + self._get_bits(qparam.logit)
|
213 |
-
if exact:
|
214 |
-
bits = bits.round().clamp(1, 15)
|
215 |
-
if self.group_size == 0:
|
216 |
-
group_size = qparam.param.numel()
|
217 |
-
else:
|
218 |
-
group_size = self.group_size
|
219 |
-
subtotal += group_size * bits.sum()
|
220 |
-
subtotal += 2 * 32 # param scale
|
221 |
-
|
222 |
-
# Number of bits to represent each number of bits
|
223 |
-
bits_bits = math.ceil(math.log2(1 + (bits.max().round().item() - self.min_bits)))
|
224 |
-
subtotal += 8 # 8 bits for bits_bits
|
225 |
-
subtotal += bits_bits * bits.numel()
|
226 |
-
|
227 |
-
subtotal /= 2 ** 20 * 8 # bits -> MegaBytes
|
228 |
-
return total + subtotal
|
229 |
-
|
230 |
-
def true_model_size(self):
|
231 |
-
"""
|
232 |
-
Naive model size without zlib compression.
|
233 |
-
"""
|
234 |
-
return self.model_size(exact=True).item()
|
235 |
-
|
236 |
-
def _pre_forward_train(self):
|
237 |
-
if not self._optimizer_setup:
|
238 |
-
raise RuntimeError("You must call `setup_optimizer()` on your optimizer "
|
239 |
-
"before starting training.")
|
240 |
-
for qparam in self._qparams:
|
241 |
-
if qparam.other is not None:
|
242 |
-
noisy = qparam.other.module._parameters[qparam.other.name]
|
243 |
-
else:
|
244 |
-
bits = self._get_bits(qparam.logit)[:, None]
|
245 |
-
if self.group_size == 0:
|
246 |
-
p_flat = qparam.param.view(-1)
|
247 |
-
else:
|
248 |
-
p_flat = qparam.param.view(-1, self.group_size)
|
249 |
-
scale = p_flat.max() - p_flat.min()
|
250 |
-
unit = 1 / (2**bits - 1)
|
251 |
-
if self.noise == "uniform":
|
252 |
-
noise_source = (torch.rand_like(p_flat) - 0.5)
|
253 |
-
elif self.noise == "gaussian":
|
254 |
-
noise_source = torch.randn_like(p_flat) / 2
|
255 |
-
noise = scale * unit * noise_source
|
256 |
-
noisy = p_flat + noise
|
257 |
-
# We bypass the checks by PyTorch on parameters being leafs
|
258 |
-
qparam.module._parameters[qparam.name] = noisy.view_as(qparam.param)
|
259 |
-
return True
|
260 |
-
|
261 |
-
def _post_forward_train(self):
|
262 |
-
for qparam in self._qparams:
|
263 |
-
qparam.module._parameters[qparam.name] = qparam.param
|
264 |
-
return True
|
265 |
-
|
266 |
-
def _quantize_param(self, qparam: _QuantizedParam) -> tp.Any:
|
267 |
-
bits = self.extra_bits + self._get_bits(qparam.logit)
|
268 |
-
bits = bits.round().clamp(1, 15)[:, None].byte()
|
269 |
-
if self.group_size == 0:
|
270 |
-
p = qparam.param.data.view(-1)
|
271 |
-
else:
|
272 |
-
p = qparam.param.data.view(-1, self.group_size)
|
273 |
-
levels, scales = uniform_quantize(p, bits)
|
274 |
-
return levels, scales, bits
|
275 |
-
|
276 |
-
def _unquantize_param(self, qparam: _QuantizedParam, quantized: tp.Any) -> torch.Tensor:
|
277 |
-
levels, param_scale, bits = quantized
|
278 |
-
return uniform_unquantize(levels, param_scale, bits).view_as(qparam.param.data)
|
279 |
-
|
280 |
-
def detach(self):
|
281 |
-
super().detach()
|
282 |
-
for qparam in self._qparams:
|
283 |
-
delattr(qparam.module, qparam.name + self.suffix)
|
284 |
-
|
285 |
-
def __repr__(self):
|
286 |
-
return simple_repr(self)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|