parquet-converter commited on
Commit
02cee30
·
1 Parent(s): 72cad55

Update parquet files (step 44 of 476)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/101-5/gpt4free/g4f/.v1/gpt4free/README.md +0 -110
  2. spaces/101-5/gpt4free/testing/binghuan/testing.py +0 -31
  3. spaces/1gistliPinn/ChatGPT4/Examples/Adobe After Effects Cc 2014 Crack Amtlib.dll.md +0 -38
  4. spaces/1line/AutoGPT/autogpt/agent/agent_manager.py +0 -103
  5. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/COD Warzone Vondel Map High Stakes Event and More - Download Today.md +0 -117
  6. spaces/1phancelerku/anime-remove-background/Download Ludo Nasa Now and Enjoy the Best Mobile Game of the Year.md +0 -132
  7. spaces/1phancelerku/anime-remove-background/Download QS Ar-Rahman - The Surah that Will Make You Cry.md +0 -174
  8. spaces/2ndelement/voicevox/voicevox_engine/dev/synthesis_engine/__init__.py +0 -3
  9. spaces/4Taps/SadTalker/src/utils/preprocess.py +0 -152
  10. spaces/801artistry/RVC801/rvc_for_realtime.py +0 -297
  11. spaces/AIFILMS/StyleGANEX/app.py +0 -124
  12. spaces/AIFILMS/StyleGANEX/configs/__init__.py +0 -0
  13. spaces/AIKey/ai_date/style.css +0 -28
  14. spaces/AIZero2HeroBootcamp/AnimatedGifGallery/app.py +0 -52
  15. spaces/ARTeLab/ARTeLab-SummIT/README.md +0 -30
  16. spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/modules/transformer.py +0 -747
  17. spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/login/+page.server.ts +0 -16
  18. spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/helpers/phind.py +0 -69
  19. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/utils/ClearChildren.js +0 -29
  20. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/slider/Slider.d.ts +0 -58
  21. spaces/Akmyradov/TurkmenTTSweSTT/uroman/lib/NLP/utilities.pm +0 -0
  22. spaces/AlekseyKorshuk/model-evaluation/app.py +0 -230
  23. spaces/AlexWortega/AlexWortega-instruct_rugptlarge/README.md +0 -12
  24. spaces/Alpaca233/ChatGPT-PPT-Generate/README.md +0 -14
  25. spaces/Amon1/ChatGPTForAcadamic/crazy_functions/解析项目源代码.py +0 -213
  26. spaces/Andres99/Tune-A-Video-Training-UI/README.md +0 -12
  27. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/lms_discrete.md +0 -20
  28. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +0 -657
  29. spaces/Andy1621/uniformer_image_detection/mmdet/core/export/pytorch2onnx.py +0 -154
  30. spaces/AnnasBlackHat/Image-Similarity/app.py +0 -32
  31. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/exp/upernet_global_small/test_config_h32.py +0 -39
  32. spaces/Anustup/NS_AI_LABS/app-local.py +0 -3
  33. spaces/Arcader7171/positive/README.md +0 -12
  34. spaces/Armored-Atom/gpt2/app.py +0 -3
  35. spaces/Artrajz/vits-simple-api/static/css/style.css +0 -84
  36. spaces/AvaterClasher/Food_Classifier_Refined_MONI/README.md +0 -13
  37. spaces/Awesimo/jojogan/app.py +0 -124
  38. spaces/Awesimo/jojogan/e4e/editings/ganspace.py +0 -22
  39. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/utils/visualizer.py +0 -1267
  40. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/layers/iou_loss.py +0 -121
  41. spaces/Bart92/RVC_HF/lib/uvr5_pack/lib_v5/nets_61968KB.py +0 -122
  42. spaces/Benson/text-generation/Examples/Base-1.apk.md +0 -53
  43. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/build_py.py +0 -407
  44. spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/util/queue.py +0 -22
  45. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h +0 -115
  46. spaces/CVPR/LIVE/thrust/thrust/device_new.h +0 -88
  47. spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/par_to_seq.h +0 -91
  48. spaces/CVPR/LIVE/thrust/thrust/uninitialized_fill.h +0 -275
  49. spaces/CVPR/WALT/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py +0 -55
  50. spaces/CVPR/WALT/train.py +0 -191
spaces/101-5/gpt4free/g4f/.v1/gpt4free/README.md DELETED
@@ -1,110 +0,0 @@
1
- # gpt4free package
2
-
3
- ### What is it?
4
-
5
- gpt4free is a python package that provides some language model api's
6
-
7
- ### Main Features
8
-
9
- - It's free to use
10
- - Easy access
11
-
12
- ### Installation:
13
-
14
- ```bash
15
- pip install gpt4free
16
- ```
17
-
18
- #### Usage:
19
-
20
- ```python
21
- import gpt4free
22
- from gpt4free import Provider, quora, forefront
23
-
24
- # usage You
25
- response = gpt4free.Completion.create(Provider.You, prompt='Write a poem on Lionel Messi')
26
- print(response)
27
-
28
- # usage Poe
29
- token = quora.Account.create(logging=False)
30
- response = gpt4free.Completion.create(Provider.Poe, prompt='Write a poem on Lionel Messi', token=token, model='ChatGPT')
31
- print(response)
32
-
33
- # usage forefront
34
- token = forefront.Account.create(logging=False)
35
- response = gpt4free.Completion.create(
36
- Provider.ForeFront, prompt='Write a poem on Lionel Messi', model='gpt-4', token=token
37
- )
38
- print(response)
39
- print(f'END')
40
-
41
- # usage theb
42
- response = gpt4free.Completion.create(Provider.Theb, prompt='Write a poem on Lionel Messi')
43
- print(response)
44
-
45
-
46
- ```
47
-
48
- ### Invocation Arguments
49
-
50
- `gpt4free.Completion.create()` method has two required arguments
51
-
52
- 1. Provider: This is an enum representing different provider
53
- 2. prompt: This is the user input
54
-
55
- #### Keyword Arguments
56
-
57
- Some of the keyword arguments are optional, while others are required.
58
-
59
- - You:
60
- - `safe_search`: boolean - default value is `False`
61
- - `include_links`: boolean - default value is `False`
62
- - `detailed`: boolean - default value is `False`
63
- - Quora:
64
- - `token`: str - this needs to be provided by the user
65
- - `model`: str - default value is `gpt-4`.
66
-
67
- (Available models: `['Sage', 'GPT-4', 'Claude+', 'Claude-instant', 'ChatGPT', 'Dragonfly', 'NeevaAI']`)
68
- - ForeFront:
69
- - `token`: str - this need to be provided by the user
70
-
71
- - Theb:
72
- (no keyword arguments required)
73
-
74
- #### Token generation of quora
75
- ```python
76
- from gpt4free import quora
77
-
78
- token = quora.Account.create(logging=False)
79
- ```
80
-
81
- ### Token generation of ForeFront
82
- ```python
83
- from gpt4free import forefront
84
-
85
- token = forefront.Account.create(logging=False)
86
- ```
87
-
88
- ## Copyright:
89
-
90
- This program is licensed under the [GNU GPL v3](https://www.gnu.org/licenses/gpl-3.0.txt)
91
-
92
- ### Copyright Notice: <a name="copyright"></a>
93
-
94
- ```
95
- xtekky/gpt4free: multiple reverse engineered language-model api's to decentralise the ai industry.
96
- Copyright (C) 2023 xtekky
97
-
98
- This program is free software: you can redistribute it and/or modify
99
- it under the terms of the GNU General Public License as published by
100
- the Free Software Foundation, either version 3 of the License, or
101
- (at your option) any later version.
102
-
103
- This program is distributed in the hope that it will be useful,
104
- but WITHOUT ANY WARRANTY; without even the implied warranty of
105
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
106
- GNU General Public License for more details.
107
-
108
- You should have received a copy of the GNU General Public License
109
- along with this program. If not, see <https://www.gnu.org/licenses/>.
110
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/101-5/gpt4free/testing/binghuan/testing.py DELETED
@@ -1,31 +0,0 @@
1
- from BingHuan import ChatCompletion
2
-
3
- # Test 1
4
- response = ChatCompletion.create(model="gpt-3.5-turbo",
5
- provider="BingHuan",
6
- stream=False,
7
- messages=[{'role': 'user', 'content': 'who are you?'}])
8
-
9
- print(response)
10
-
11
- # Test 2
12
- # this prompt will return emoji in end of response
13
- response = ChatCompletion.create(model="gpt-3.5-turbo",
14
- provider="BingHuan",
15
- stream=False,
16
- messages=[{'role': 'user', 'content': 'what you can do?'}])
17
-
18
- print(response)
19
-
20
-
21
- # Test 3
22
- response = ChatCompletion.create(model="gpt-4",
23
- provider="BingHuan",
24
- stream=False,
25
- messages=[
26
- {'role': 'user', 'content': 'now your name is Bob'},
27
- {'role': 'assistant', 'content': 'Hello Im Bob, you asistant'},
28
- {'role': 'user', 'content': 'what your name again?'},
29
- ])
30
-
31
- print(response)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Adobe After Effects Cc 2014 Crack Amtlib.dll.md DELETED
@@ -1,38 +0,0 @@
1
- <h2>adobe after effects cc 2014 crack amtlib.dll</h2><br /><p><b><b>DOWNLOAD</b> &mdash; <a href="https://imgfil.com/2uy1W2">https://imgfil.com/2uy1W2</a></b></p><br /><br />
2
- <br />
3
- not found
4
-
5
- I'm running Adobe After Effects CC 2014 on a windows 7 system 64bit.
6
-
7
- I'm trying to add a CS6 project to this installation. It is running fine but everytime I try to add a css file or stylesheet it fails saying amtlib.dll was not found. I am running the 64bit OS. I've looked through other threads here and I've tried to:
8
-
9
- Add the libraries to the Adobe directory located in C:\Program Files\Adobe\Adobe After Effects CC 2014
10
-
11
- Create a symbolic link pointing to C:\Program Files\Adobe\Adobe After Effects CC 2014\amtlib.dll
12
-
13
- Restart computer
14
-
15
- Nothing seems to work. Any thoughts? Any further help is appreciated. Thank you.
16
-
17
- A:
18
-
19
- In my case Adobe added the dll in the wrong folder. Where it was pointing to is the Adobe Shared\amtlib.dll, if you delete this folder and open the installation folder and make the symbolic link again, it will work.
20
-
21
- Pages
22
-
23
- Thursday, May 14, 2012
24
-
25
- Thursday Thirteen - Next chapter!
26
-
27
- And that is the end of this story. It's been a good ride, but I think it's time for me to move on to other projects. But, what projects?
28
-
29
- Next story is going to be written by my buddy Gary Marti. Gary lives about thirty-five miles away from me in a little city in Texas named Oasis. He and I went to school together (seven years) and have been friends since. His wife, Kari, and I have been friends as well.
30
-
31
- While I've known Gary for many years, I'm really looking forward to sharing a great friendship with him. Gary and I have been discussing a story and I'm excited that he's going to write it for me. I'm even more excited that I can write along side Gary and we'll take turns with each chapter. Gary has been taking his time in working on the chapter, so he doesn't have any chapters in writing yet.
32
-
33
- I'm not telling you anything about this story except for the fact that it will involve a sports team and a man that will determine the fate of the team. And, just as important, he will determine the fate of the man.
34
-
35
- Right now, I'm thinking of some of my writing projects and decided that I'm going to write a short story about 4fefd39f24<br />
36
- <br />
37
- <br />
38
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1line/AutoGPT/autogpt/agent/agent_manager.py DELETED
@@ -1,103 +0,0 @@
1
- """Agent manager for managing GPT agents"""
2
- from __future__ import annotations
3
-
4
- from typing import Union
5
-
6
- from autogpt.config.config import Singleton
7
- from autogpt.llm_utils import create_chat_completion
8
-
9
-
10
- class AgentManager(metaclass=Singleton):
11
- """Agent manager for managing GPT agents"""
12
-
13
- def __init__(self):
14
- self.next_key = 0
15
- self.agents = {} # key, (task, full_message_history, model)
16
-
17
- # Create new GPT agent
18
- # TODO: Centralise use of create_chat_completion() to globally enforce token limit
19
-
20
- def create_agent(self, task: str, prompt: str, model: str) -> tuple[int, str]:
21
- """Create a new agent and return its key
22
-
23
- Args:
24
- task: The task to perform
25
- prompt: The prompt to use
26
- model: The model to use
27
-
28
- Returns:
29
- The key of the new agent
30
- """
31
- messages = [
32
- {"role": "user", "content": prompt},
33
- ]
34
-
35
- # Start GPT instance
36
- agent_reply = create_chat_completion(
37
- model=model,
38
- messages=messages,
39
- )
40
-
41
- # Update full message history
42
- messages.append({"role": "assistant", "content": agent_reply})
43
-
44
- key = self.next_key
45
- # This is done instead of len(agents) to make keys unique even if agents
46
- # are deleted
47
- self.next_key += 1
48
-
49
- self.agents[key] = (task, messages, model)
50
-
51
- return key, agent_reply
52
-
53
- def message_agent(self, key: str | int, message: str) -> str:
54
- """Send a message to an agent and return its response
55
-
56
- Args:
57
- key: The key of the agent to message
58
- message: The message to send to the agent
59
-
60
- Returns:
61
- The agent's response
62
- """
63
- task, messages, model = self.agents[int(key)]
64
-
65
- # Add user message to message history before sending to agent
66
- messages.append({"role": "user", "content": message})
67
-
68
- # Start GPT instance
69
- agent_reply = create_chat_completion(
70
- model=model,
71
- messages=messages,
72
- )
73
-
74
- # Update full message history
75
- messages.append({"role": "assistant", "content": agent_reply})
76
-
77
- return agent_reply
78
-
79
- def list_agents(self) -> list[tuple[str | int, str]]:
80
- """Return a list of all agents
81
-
82
- Returns:
83
- A list of tuples of the form (key, task)
84
- """
85
-
86
- # Return a list of agent keys and their tasks
87
- return [(key, task) for key, (task, _, _) in self.agents.items()]
88
-
89
- def delete_agent(self, key: Union[str, int]) -> bool:
90
- """Delete an agent from the agent manager
91
-
92
- Args:
93
- key: The key of the agent to delete
94
-
95
- Returns:
96
- True if successful, False otherwise
97
- """
98
-
99
- try:
100
- del self.agents[int(key)]
101
- return True
102
- except KeyError:
103
- return False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/COD Warzone Vondel Map High Stakes Event and More - Download Today.md DELETED
@@ -1,117 +0,0 @@
1
- <br />
2
- <h1>How to Download and Play COD Warzone: A Complete Guide</h1>
3
- <p>If you are looking for a thrilling and action-packed battle royale game, you might want to check out COD Warzone. This free-to-play game is set in the Modern Warfare universe and offers a variety of modes, features, and challenges to keep you entertained. In this guide, we will show you how to download and play COD Warzone on PC, PS4, and Xbox One, as well as give you some tips and tricks to help you win.</p>
4
- <h2>cod warzone download</h2><br /><p><b><b>Download File</b> &#10004; <a href="https://urlin.us/2uT1zl">https://urlin.us/2uT1zl</a></b></p><br /><br />
5
- <h2>What is COD Warzone?</h2>
6
- <h3>A free-to-play battle royale game set in the Modern Warfare universe</h3>
7
- <p>COD Warzone is a spin-off of the popular Call of Duty franchise, developed by Infinity Ward and Raven Software. It was released in March 2020 as a standalone game that does not require any previous Call of Duty titles to play. It is also cross-platform, meaning that you can play with your friends regardless of what device they are using.</p>
8
- <p>COD Warzone is set in Verdansk, a fictional city inspired by real-world locations in Eastern Europe. The game features over 300 points of interest, multiple named zones, and distinct landmarks to explore. The map is constantly evolving with new updates, events, and seasons that introduce new content and changes.</p>
9
- <h3>The main features and modes of COD Warzone</h3>
10
- <h4>Battle Royale: Survive against up to 150 players in a shrinking map</h4>
11
- <p>The core mode of COD Warzone is Battle Royale, where you can play solo or in teams of two, three, or four. Your goal is to be the last one standing out of up to 150 players who parachute into the map. You have to scavenge for weapons, equipment, cash, and contracts that give you objectives and rewards. You also have to avoid the gas that closes in on the map over time, forcing you to move to safer zones.</p>
12
- <p>One of the unique features of COD Warzone's Battle Royale is the Gulag. When you die for the first time in a match, you are sent to the Gulag, where you have a chance to fight another fallen player in a 1v1 match. The winner gets to redeploy back into the game, while the loser is eliminated. You can also be revived by your teammates or buy back your teammates at Buy Stations if they have enough cash.</p>
13
- <h4>Plunder: Collect cash and loot in a race to reach $1 million</h4>
14
- <p>If you prefer a more casual and less stressful mode, you can try Plunder. In this mode, you can play in teams of two, three, or four, and your goal is to collect as much cash as possible by looting, completing contracts, killing enemies, or depositing at helipads or balloons. The first team to reach $1 million triggers overtime <p>where the cash values are doubled and the team with the most cash at the end wins. You can respawn unlimited times in this mode, but you lose some of your cash when you die. You can also loot cash from other players or steal their deposits.</p>
15
- <h4>Strongholds: Raid AI-protected buildings for high-tier loot and rewards</h4>
16
- <p>A new mode that was added in Season 6 of COD Warzone is Strongholds. In this mode, you can play in teams of two, three, or four, and your goal is to raid buildings that are guarded by AI enemies. These buildings contain high-tier loot, such as legendary weapons, killstreaks, and armor satchels. You also get rewards for clearing each floor and reaching the rooftop, where you can find a helicopter that will take you to the next stronghold.</p>
17
- <p>cod warzone download size<br />
18
- cod warzone download pc free<br />
19
- cod warzone download ps4<br />
20
- cod warzone download xbox one<br />
21
- cod warzone download time<br />
22
- cod warzone download error<br />
23
- cod warzone download stuck<br />
24
- cod warzone download slow<br />
25
- cod warzone download season 4<br />
26
- cod warzone download not working<br />
27
- cod warzone download update<br />
28
- cod warzone download link<br />
29
- cod warzone download requirements<br />
30
- cod warzone download mac<br />
31
- cod warzone download steam<br />
32
- cod warzone download speed<br />
33
- cod warzone download without modern warfare<br />
34
- cod warzone download problem<br />
35
- cod warzone download vondel map<br />
36
- cod warzone download free to play<br />
37
- cod warzone download latest version<br />
38
- cod warzone download offline<br />
39
- cod warzone download for android<br />
40
- cod warzone download for windows 10<br />
41
- cod warzone download for laptop<br />
42
- cod warzone download from activision website[^1^]<br />
43
- cod warzone download from playstation store[^2^]<br />
44
- cod warzone download from xbox store[^2^]<br />
45
- cod warzone download from battle.net[^2^]<br />
46
- cod warzone download from steam[^2^]<br />
47
- cod warzone download high stakes event[^1^]<br />
48
- cod warzone download tactical amphibious vehicle[^1^]<br />
49
- cod warzone download ricochet anti-cheat[^1^]<br />
50
- cod warzone download blackcell sector[^1^]<br />
51
- cod warzone download tips and tricks[^3^]<br />
52
- cod warzone download best settings[^3^]<br />
53
- cod warzone download best weapons[^3^]<br />
54
- cod warzone download best loadouts[^3^]<br />
55
- cod warzone download best operators[^3^]<br />
56
- cod warzone download best perks[^3^]</p>
57
- <p>However, you are not alone in this mode. Other teams can also enter the same stronghold and compete with you for the loot and rewards. You can also encounter other teams on your way to the next stronghold or at the extraction point. You have to balance between speed and stealth, as well as teamwork and strategy, to survive and win this mode.</p>
58
- <h4>Black Sites: Explore mysterious locations for secrets and surprises</h4>
59
- <p>Another new feature that was added in Season 6 of COD Warzone is Black Sites. These are hidden locations that are scattered around the map and can only be accessed by finding and activating red access cards. These cards can be found by looting crates, completing contracts, or killing enemies. Once you activate a card, you can enter a black site and explore its secrets and surprises.</p>
60
- <p>Black sites contain rare loot, such as specialist tokens, juggernaut suits, advanced UAVs, and self-revive kits. They also have clues and hints about the lore and story of COD Warzone, as well as Easter eggs and puzzles that can unlock rewards or trigger events. Some black sites are more dangerous than others, as they may have traps, alarms, or enemies waiting for you. You also have to watch out for other players who may follow you or ambush you at the black sites.</p>
61
- <h2>How to download COD Warzone on PC, PS4, and Xbox One</h2>
62
- <h3>PC: Download the Battle.net launcher and install the game</h3>
63
- <p>If you want to play COD Warzone on PC, you need to download the Battle.net launcher from the official website of Blizzard Entertainment. This is a free platform that allows you to access and play games developed by Blizzard or its partners, such as COD Warzone. Once you download and install the launcher, you need to create an account or log in with an existing one.</p>
64
- <p>After that, you can find COD Warzone in the Games tab of the launcher. You can click on it and then click on Install to start downloading the game. The game size is about 100 GB, so make sure you have enough space and a stable internet connection. You can also adjust the download settings and preferences in the launcher.</p>
65
- <h4>The system requirements for PC</h4>
66
- <p>Before you download COD Warzone on PC, you should check if your system meets the minimum or recommended requirements for the game. Here are the system requirements according to the official website of COD Warzone:</p>
67
- | Minimum | Recommended | | --- | --- | | OS: Windows 7 64-Bit (SP1) or Windows 10 64-Bit | OS: Windows 10 64 Bit (latest update) | | CPU: Intel Core i3-4340 or AMD FX-6300 | CPU: Intel Core i5-2500K or AMD Ryzen R5 1600X | | RAM: 8 GB | RAM: 12 GB | | GPU: NVIDIA GeForce GTX 670 / NVIDIA GeForce GTX 1650 or AMD Radeon HD 7950 | GPU: NVIDIA GeForce GTX 970 / NVIDIA GeForce GTX 1660 or AMD Radeon R9 390 / AMD Radeon RX 580 | | HDD: 100 GB | HDD: 100 GB | | DirectX: Version 12 | DirectX: Version 12 | <h3>PS4: Download the game from the PlayStation Store</h3>
68
- <p>If you want to play COD Warzone on PS4, you need to download the game from the PlayStation Store. You can access the store from your PS4 console or from a web browser on your PC or mobile device. You need to have a PlayStation Network account to access the store and download the game.</p>
69
- <p>Once you find COD Warzone in the store, you can click on Download to start downloading the game. The game size is about 100 GB, so make sure you have enough space and a stable internet connection. You can also check the download progress and status in your Notifications menu on your PS4 console.</p>
70
- <h4 The storage space and online subscription required for PS4</h4>
71
- <p>As mentioned, you need to have at least 100 GB of free space on your PS4 console to download and install COD Warzone. You can check your available space in the Settings menu of your console. You can also delete or move some files or games to free up some space if needed.</p>
72
- <p>Another thing you need to play COD Warzone on PS4 is an online subscription. You need to have a PlayStation Plus membership to play online multiplayer games on PS4. This is a paid service that gives you access to online gaming, free monthly games, exclusive discounts, and more. You can buy a PlayStation Plus membership from the PlayStation Store or from a retailer. You can choose from different plans, such as monthly, quarterly, or yearly.</p>
73
- <h3>Xbox One: Download the game from the Microsoft Store</h3>
74
- <p>If you want to play COD Warzone on Xbox One, you need to download the game from the Microsoft Store. You can access the store from your Xbox One console or from a web browser on your PC or mobile device. You need to have a Microsoft account to access the store and download the game.</p>
75
- <p>Once you find COD Warzone in the store, you can click on Get to start downloading the game. The game size is about 100 GB, so make sure you have enough space and a stable internet connection. You can also check the download progress and status in your Queue menu on your Xbox One console.</p>
76
- <h4>The storage space and online subscription required for Xbox One</h4>
77
- <p>As mentioned, you need to have at least 100 GB of free space on your Xbox One console to download and install COD Warzone. You can check your available space in the Settings menu of your console. You can also delete or move some files or games to free up some space if needed.</p>
78
- <p>Another thing you need to play COD Warzone on Xbox One is an online subscription. You need to have an Xbox Live Gold membership to play online multiplayer games on Xbox One. This is a paid service that gives you access to online gaming, free monthly games, exclusive discounts, and more. You can buy an Xbox Live Gold membership from the Microsoft Store or from a retailer. You can choose from different plans, such as monthly, quarterly, or yearly.</p>
79
- <h2>How to play COD Warzone: Tips and tricks for beginners</h2>
80
- <h3>Prioritize getting your loadout and armor satchel</h3>
81
- <p>One of the most important things to do in COD Warzone is to get your loadout and armor satchel as soon as possible. Your loadout is a custom set of weapons, perks, and equipment that you can create in the main menu of the game. You can access your loadout in a match by buying a loadout drop at a Buy Station for $10,000 or by finding one that drops randomly on the map.</p>
82
- <p>Your loadout allows you to use your preferred weapons and perks that suit your playstyle and strategy. For example, you can use a sniper rifle and a ghost perk if you want to be stealthy and snipe enemies from afar, or you can use a shotgun and an overkill perk if you want to rush enemies and deal high damage up close.</p>
83
- <p>Your armor satchel is an item that allows you to carry up to eight armor plates instead of five. Armor plates are essential for surviving in COD Warzone, as they give you extra health and protection from enemy fire. You can find armor plates by looting crates, enemies, or Buy Stations. You can also find armor satchels by looting legendary crates, enemies, or Buy Stations.</p>
84
- <h3>Communicate and use the ping system with your teammates</h3>
85
- <p>Another important thing to do in COD Warzone is to communicate and use the ping system with your teammates. Communication is key for teamwork and coordination in any multiplayer game, especially in a battle royale game where you have to work together to survive and win. You can communicate with your teammates by using voice chat or text chat in the game.</p>
86
- <p>The ping system is a feature that allows you to mark locations, enemies, items, or other points of interest on the map or on your screen for your teammates to see. You can use the ping system by pressing the D-pad on your controller or the left alt key on your keyboard. You can also use different types of pings by holding down the ping button and selecting an option from the wheel menu.</p>
87
- <p>The ping system is very useful for sharing information and giving commands without using voice chat or text chat. For example, you can ping an enemy location to warn your teammates of danger, ping a loot crate to tell your teammates where to find items, ping a Buy Station to suggest buying something, or ping a location to tell your teammates where to go or regroup.</p>
88
- <h3>Keep an eye on the map and the circle movements</h3>
89
- <p>A third important thing to do in COD Warzone is to keep an eye on the map and the circle movements. The map is your best friend in a battle royale game, as it shows you where you are, where your teammates are, where your enemies are, where the loot is, where the contracts are, where the Buy Stations are, and more. You can access the map by pressing the touchpad on your controller or the M key on your keyboard.</p>
90
- <p>The circle movements are the mechanism that forces you and your enemies to move closer together as the match progresses. The circle is a safe zone that shrinks over time, and anyone who is outside of it will take damage from the gas. The circle movements are shown on the map as white and yellow lines, and you can also see a timer that tells you when the next circle will start moving.</p>
91
- <p>You should always be aware of where the circle is and where it is going, as well as plan your route and position accordingly. You don't want to be caught in the gas or in a bad spot when the circle closes in. You also want to avoid being in the open or in a crowded area where you can be easily spotted or ambushed by enemies.</p>
92
- <h3>Visit strongholds and black sites for better loot and challenges</h3>
93
- <p>A fourth important thing to do in COD Warzone is to visit strongholds and black sites for better loot and challenges. As we mentioned earlier, these are new features that were added in Season 6 of COD Warzone, and they offer a lot of benefits and risks for players who dare to explore them.</p>
94
- <p>Strongholds are buildings that are guarded by AI enemies, and they contain high-tier loot and rewards. You can find strongholds by looking for red icons on the map or on your screen. You can enter a stronghold by finding a keypad and entering a code that you can get from crates, contracts, or enemies. You can then clear each floor of the stronghold and reach the rooftop, where you can find a helicopter that will take you to the next stronghold.</p>
95
- <p>Black sites are hidden locations that can only be accessed by finding and activating red access cards. These cards can be found by looting crates, contracts, or enemies. You can then use a card to open a door or an elevator that will take you to a black site. Black sites contain rare loot, clues, Easter eggs, puzzles, and events.</p>
96
- <p>Both strongholds and black sites are great places to find better loot and challenges, but they also come with risks. You have to fight against AI enemies or other players who may enter the same location. You also have to manage your time and resources, as you may miss out on other opportunities or get caught by the circle if you spend too much time in these locations.</p>
97
- <h3>Play to your strengths and use cover wisely</h3>
98
- <p>A fifth important thing to do in COD Warzone is to play to your strengths and use cover wisely. COD Warzone is a game that rewards skill, strategy, and creativity, but it also punishes mistakes, carelessness, and recklessness. You have to know your strengths and weaknesses as a player, as well as your weapons and equipment.</p>
99
- <p>You should play to your strengths and use weapons and equipment that suit your playstyle and strategy. For example, if you are good at sniping, you should use a sniper rifle and a scope that allow you to hit long-range shots. If you are good at rushing, you should use a shotgun or an SMG that allow you to deal high damage up close.</p>
100
- <p>You should also use cover wisely and avoid exposing yourself unnecessarily. Cover is anything that can protect you from enemy fire, such as walls, buildings, rocks, trees, vehicles, etc. You should always move from cover to cover and avoid running in the open or standing still for too long. You should also use different types of cover depending on the situation. For example, if you are being sniped from afar, you should use hard cover that blocks bullets completely. If you are being rushed by enemies nearby , you should use soft cover that allows you to peek and shoot quickly.</p>
101
- <h2>Conclusion</h2>
102
- <p>COD Warzone is a fun and exciting battle royale game that offers a lot of variety, content, and challenges for players of all skill levels. Whether you want to play solo or with your friends, you can enjoy the different modes, features, and events that COD Warzone has to offer. You can also customize your loadout, explore the map, and discover secrets and surprises along the way.</p>
103
- <p>To play COD Warzone, you need to download the game from the appropriate store depending on your device. You also need to have enough space and a stable internet connection. You may also need to have an online subscription if you are playing on PS4 or Xbox One. You can then start playing the game and follow the tips and tricks we have shared in this guide to help you win.</p>
104
- <p>We hope you found this guide helpful and informative. If you have any questions or feedback, please let us know in the comments below. Thank you for reading and happy gaming!</p>
105
- <h2>FAQs</h2>
106
- <h3>Q: How much does COD Warzone cost?</h3>
107
- <p>A: COD Warzone is a free-to-play game that does not require any previous Call of Duty titles to play. However, you may need to pay for an online subscription if you are playing on PS4 or Xbox One.</p>
108
- <h3>Q: How often does COD Warzone update?</h3>
109
- <p>A: COD Warzone updates regularly with new seasons, events, and patches that introduce new content and changes. Each season lasts for about two months and has its own theme, story, and rewards. Each event lasts for a limited time and has its own objectives, challenges, and rewards. Each patch fixes bugs, balances gameplay, and improves performance.</p>
110
- <h3>Q: How many players can play COD Warzone?</h3>
111
- <p>A: COD Warzone supports up to 150 players in a match, depending on the mode and settings. You can play solo or in teams of two, three, or four.</p>
112
- <h3>Q: How do I get better at COD Warzone?</h3>
113
- <p>A: The best way to get better at COD Warzone is to practice and learn from your mistakes. You can also watch tutorials, guides, and streams from other players who are more experienced or skilled than you. You can also try different weapons, perks, and strategies to find what works best for you.</p>
114
- <h3>Q: Is COD Warzone cross-platform?</h3>
115
- <p>A: Yes, COD Warzone is cross-platform, meaning that you can play with your friends regardless of what device they are using. You can also enable or disable cross-play in the settings menu of the game.</p> 197e85843d<br />
116
- <br />
117
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Ludo Nasa Now and Enjoy the Best Mobile Game of the Year.md DELETED
@@ -1,132 +0,0 @@
1
- <br />
2
- <h1>Ludo Nasa Download: A Guide to the Most Popular Game of the Year</h1>
3
- <p>Ludo nasa is a free-to-play mobile game application that has taken the world by storm. It is a modern version of the classic board game ludo, which is derived from the ancient Indian game of Pachisi. Ludo nasa offers a variety of features and themes that make it more fun and engaging than ever. Whether you want to play with your family and friends, or challenge players from around the world, ludo nasa has something for everyone. In this article, we will tell you everything you need to know about ludo nasa download, including its history, features, and benefits.</p>
4
- <h2>History of ludo game</h2>
5
- <p>Ludo game has a long and rich history that dates back to the 6th century CE in India. It is believed that the game was created by the Indian maharajas, who played it on a board made of cloth or slate, using seeds, shells, or dice as tokens. The original version of the game was called Chaupar, and it was also described in the Indian epic Mahabharata, where it was used as a tool for gambling and deception. The game was later modified by the Mughal emperors, such as Akbar, who played it with real people as tokens on a life-sized board. The game was also known as Pachisi, which means twenty-five in Hindi, referring to the highest score possible in the game.</p>
6
- <h2>ludo nasa download</h2><br /><p><b><b>Download</b> --->>> <a href="https://jinyurl.com/2uNRSm">https://jinyurl.com/2uNRSm</a></b></p><br /><br />
7
- <p>The game spread to other countries and regions through trade and colonization, and acquired different names and variations. For example, in Spain, it was called Parcheesi; in China, it was called Chatush pada; and in Africa, it was called Ludu. The game reached England in the 19th century, where it was patented as Ludo by Alfred Collier in 1896. Ludo means "I play" in Latin, and it became a popular board game for children and adults alike. Ludo also inspired other games, such as Uckers, which was played by the Royal Navy.</p>
8
- <h2>Features of ludo game</h2>
9
- <p>Ludo game is a simple yet strategic board game that can be played by two to four players. The objective of the game is to move four tokens of the same color from the starting point to the finishing point on the board, according to the rolls of a single die. The first player to do so wins the game. However, there are some challenges and twists along the way, such as:</p>
10
- <ul>
11
- <li>If a player rolls a six, they get another turn to roll the die.</li>
12
- <li>If a player lands on a square occupied by an opponent's token, they can capture that token and send it back to the starting point.</li>
13
- <li>If a player lands on a square occupied by their own token, they can form a block that cannot be captured by opponents.</li>
14
- <li>If a player reaches the square below their home column, they can move their tokens up the column to the finishing point.</li>
15
- </ul>
16
- <p>Ludo game can be played in different modes and themes, depending on the preference of the players. Some of the common modes and themes are:</p>
17
- <ul>
18
- <li>vs Computer: This mode allows players to play offline against the computer AI.</li>
19
- <li>Local Mode: This mode allows players to play offline with their family and friends on the same device.</li>
20
- <li>Online Multiplayer: This mode allows players to play online with other players from around the world.</li>
21
- <li>Private Multiplayer: This mode allows players to play online with their Facebook friends or other invited players in private rooms.</li>
22
- <li>Nature Theme: This theme continues the theme of the board game with natural elements, such as trees, flowers, and animals.</li>
23
- <li>Egypt Theme: This theme adds a touch of ancient history and mythology to the board game, with pyramids, sphinxes, and pharaohs.</li>
24
- <li>Disco Theme: This theme brings some fun and excitement to the board game, with colorful lights, music, and dance moves.</li>
25
- <li>NASA Theme: This theme takes the board game to outer space, with planets, stars, and rockets.</li>
26
- </ul>
27
- <p>Ludo game also has some social benefits that make it more enjoyable and rewarding for the players. Some of these benefits are:</p>
28
- <ul>
29
- <li>It improves the cognitive skills and logical thinking of the players, as they have to plan their moves and strategies.</li>
30
- <li>It enhances the communication and teamwork skills of the players, as they have to interact and cooperate with each other.</li>
31
- <li>It reduces stress and boredom, as it provides a fun and relaxing way to pass the time.</li>
32
- <li>It strengthens the bonds and relationships of the players, as it creates a sense of camaraderie and competition.</li>
33
- </ul>
34
- <h2>Ludo nasa download</h2>
35
- <p>Ludo nasa is one of the most popular and downloaded versions of ludo game in the market. It has over 100 million downloads on Google Play Store and over 10 million downloads on App Store. It is compatible with Android and iOS devices, as well as Windows PC and Mac. To download and play ludo nasa on your device, you can follow these simple steps:</p>
36
- <h3>For Android devices</h3>
37
- <ol>
38
- <li>Go to Google Play Store and search for ludo nasa.</li>
39
- <li>Select the app from the list and tap on Install.</li>
40
- <li>Wait for the app to download and install on your device.</li>
41
- <li>Open the app and enjoy playing ludo nasa with your friends or online players.</li>
42
- </ol>
43
- <h3>For iOS devices</h3>
44
- <ol>
45
- <li>Go to App Store and search for ludo nasa.</li>
46
- <li>Select the app from the list and tap on Get.</li>
47
- <li>Enter your Apple ID password or use Touch ID or Face ID to confirm.</li>
48
- <li>Wait for the app to download and install on your device.</li>
49
- <li>Open the app and enjoy playing ludo nasa with your friends or online players.</li>
50
- </ol>
51
- <h3>For Windows PC or Mac</h3>
52
- <ol>
53
- <li>Go to https://ludonasa.com/ and click on Download for PC or Download for Mac.</li>
54
- <li>Select the version that matches your operating system and click on Download Now.</li>
55
- <li>Wait for the file to download on your computer.</li>
56
- <li>Open the file and follow the instructions to install ludo nasa on your computer.</li>
57
- <li>Launch ludo nasa from your desktop or start menu and enjoy playing ludo nasa with your friends or online players.</li>
58
- </ol>
59
- <h2>Conclusion</h2>
60
- <p>Ludo nasa is a fun and exciting game that you can play anytime, anywhere, with anyone. It is based on the classic board game ludo, which has a long and rich history in India and other countries. Ludo nasa offers a variety of features and themes that make it more appealing and engaging than ever. It also has some social benefits that improve your cognitive, communication, and emotional skills. If you are looking for a game that can entertain you, challenge you, and connect you with others, then you should definitely try ludo nasa download. Here are some tips and tricks that can help you win more games:</p>
61
- <p>ludo nasa game download<br />
62
- ludo nasa app download<br />
63
- ludo nasa apk download<br />
64
- ludo nasa online game download<br />
65
- ludo nasa play and win money download<br />
66
- ludo nasa free download<br />
67
- ludo nasa download for pc<br />
68
- ludo nasa download for android<br />
69
- ludo nasa download for ios<br />
70
- ludo nasa download link<br />
71
- ludo nasa latest version download<br />
72
- ludo nasa mod apk download<br />
73
- ludo nasa hack apk download<br />
74
- ludo nasa unlimited money download<br />
75
- ludo nasa real money game download<br />
76
- ludo nasa best ludo game download<br />
77
- ludo nasa india gaming awards 2023 winner download<br />
78
- ludo nasa quick mode game download<br />
79
- ludo nasa entertainment game download<br />
80
- ludo nasa board game download<br />
81
- how to download ludo nasa game<br />
82
- where to download ludo nasa game<br />
83
- why to download ludo nasa game<br />
84
- what is ludo nasa game download<br />
85
- when to download ludo nasa game<br />
86
- ludonasa.com game download<br />
87
- ludonasa.com app download<br />
88
- ludonasa.com apk download<br />
89
- ludonasa.com online game download<br />
90
- ludonasa.com play and win money download<br />
91
- ludonasa.com free download<br />
92
- ludonasa.com download for pc<br />
93
- ludonasa.com download for android<br />
94
- ludonasa.com download for ios<br />
95
- ludonasa.com download link<br />
96
- ludonasa.com latest version download<br />
97
- ludonasa.com mod apk download<br />
98
- ludonasa.com hack apk download<br />
99
- ludonasa.com unlimited money download<br />
100
- ludonasa.com real money game download<br />
101
- ludonasa.com best ludo game download<br />
102
- ludonasa.com india gaming awards 2023 winner download<br />
103
- ludonasa.com quick mode game download<br />
104
- ludonasa.com entertainment game download<br />
105
- ludonasa.com board game download<br />
106
- how to play ludo nasa after downloading it <br />
107
- how to win money on ludo nasa after downloading it <br />
108
- how to invite friends on ludo nasa after downloading it <br />
109
- how to update ludo nasa after downloading it <br />
110
- how to contact support on ludo nasa after downloading it </p>
111
- <ul>
112
- <li>Always try to roll a six at the beginning of the game, so that you can move your tokens out of the starting point faster.</li>
113
- <li>Avoid landing on squares that are occupied by your opponents' tokens, as they can capture them and send them back to the starting point.</li>
114
- <li>Use blocks to protect your tokens from being captured by your opponents. You can form a block by landing two or more tokens of the same color on the same square.</li>
115
- <li>Be careful when moving your tokens up the home column, as they can only move according to the exact number rolled on the die. If you roll a higher number than needed, you will have to skip your turn.</li>
116
- <li>Use different themes to spice up your game experience. Each theme has its own music, sound effects, graphics, and animations that can make your game more enjoyable.</li>
117
- </ul>
118
- <p>We hope that this article has given you some useful information about ludo nasa download. If you have any questions or feedback about ludo nasa, feel free to share them with us in the comments section below. Thank you for reading our article. We hope that you have learned something new and interesting about ludo nasa download. Before we end, we would like to answer some of the frequently asked questions that you might have about ludo nasa. Here are the top five FAQs that we have selected for you: <h3>FAQs</h3>
119
- <ol>
120
- <li>What is the difference between ludo nasa and ludo king?</li>
121
- <p>Ludo nasa and ludo king are both popular versions of ludo game, but they have some differences in terms of features and themes. Ludo nasa has more themes than ludo king, such as nature, Egypt, disco, and NASA. Ludo nasa also has more modes than ludo king, such as vs computer, local mode, online multiplayer, and private multiplayer. Ludo nasa also has a better user interface and graphics than ludo king.</p>
122
- <li>How can I play ludo nasa with voice chat?</li>
123
- <p>Ludo nasa has a voice chat feature that allows you to communicate with your friends or online players while playing the game. To use this feature, you need to enable the microphone permission on your device and join a private room with your friends or online players. Then, you can tap on the microphone icon on the top right corner of the screen to start or stop the voice chat.</p>
124
- <li>How can I earn coins and gems in ludo nasa?</li>
125
- <p>Coins and gems are the in-game currencies that you can use to buy different themes and items in ludo nasa. You can earn coins and gems by playing and winning games, completing daily tasks, watching ads, spinning the wheel, or inviting your friends to play the game. You can also buy coins and gems with real money if you want to.</p>
126
- <li>How can I update ludo nasa to the latest version?</li>
127
- <p>Ludo nasa is constantly updated with new features and improvements to enhance your gaming experience. To update ludo nasa to the latest version, you need to go to Google Play Store or App Store and check if there is any update available for the app. If there is, you can tap on Update and wait for the app to download and install on your device.</p>
128
- <li>How can I contact the customer support of ludo nasa?</li>
129
- <p>If you have any issues or queries about ludo nasa, you can contact the customer support of ludo nasa by sending an email to [email protected] or by filling out the feedback form on their website https://ludonasa.com/. They will try to respond to your message as soon as possible.</p>
130
- </ol></p> 197e85843d<br />
131
- <br />
132
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download QS Ar-Rahman - The Surah that Will Make You Cry.md DELETED
@@ -1,174 +0,0 @@
1
-
2
- <h1>Download QS Ar Rahman: How to Listen to the Beautiful Surah Online</h1>
3
- <p>QS Ar Rahman is one of the most beautiful and powerful surahs in the Quran. It is also known as "The Beneficent" or "The Most Merciful" because it begins with the name of Allah, the Most Compassionate. In this article, we will explore what QS Ar Rahman is, why it is important, and how you can download it in different formats and languages. We will also share some tips on how to benefit from listening to or reading this surah.</p>
4
- <h2>What is QS Ar Rahman and Why is it Important?</h2>
5
- <p>QS Ar Rahman is the 55th surah in the Quran, consisting of 78 verses. It was revealed in Medina, after the migration of the Prophet Muhammad (peace be upon him) and his companions from Mecca. It is one of the surahs that begins with one of the names of Allah, which is a rare feature in the Quran. It is also one of the surahs that has a refrain or chorus, which is repeated 31 times throughout the surah: "Maka, nikmat Tuhanmu manakah yang kamu dustakan (wahai jin dan manusia)?" This means "Then which of the favors of your Lord will you deny (O jinn and mankind)?"</p>
6
- <h2>download qs ar rahman</h2><br /><p><b><b>Download Zip</b> &#8250; <a href="https://jinyurl.com/2uNIib">https://jinyurl.com/2uNIib</a></b></p><br /><br />
7
- <p>QS Ar Rahman is important because it reminds us of the countless blessings and favors that Allah has bestowed upon us, both in this world and the hereafter. It also invites us to reflect on the signs of Allah's power and wisdom in His creation, such as the sun, the moon, the stars, the plants, the animals, the seas, and the human beings. It also warns us of the consequences of denying or rejecting Allah's favors, such as the punishment of hellfire or the deprivation of paradise. It also encourages us to be grateful, humble, and obedient to Allah, who is the Most Merciful and the Most Generous.</p>
8
- <h3>The Meaning and Benefits of QS Ar Rahman</h3>
9
- <p>The meaning of QS Ar Rahman is derived from its first verse, which states: "Ar-Rahman (The Most Compassionate)". This is one of the names of Allah, which describes His attribute of being infinitely kind, loving, caring, and forgiving to His creation. He is also Ar-Raheem (The Most Merciful), which means He bestows His mercy upon those who believe in Him and do good deeds. He is also Al-Wadud (The Most Loving), which means He loves those who love Him and follow His guidance.</p>
10
- <p>The benefits of QS Ar Rahman are many, as it contains verses that praise Allah's greatness, glorify His majesty, describe His favors, warn against His wrath, promise His reward, and invite to His worship. Some of the benefits are:</p>
11
- <ul>
12
- <li>It increases one's faith and gratitude towards Allah.</li>
13
- <li>It protects one from evil influences and temptations.</li>
14
- <li>It brings peace and tranquility to one's heart and mind.</li>
15
- <li>It purifies one's soul and cleanses one <li>It increases one's knowledge and understanding of the Quran.</li>
16
- <li>It enhances one's love and admiration for Allah and His creation.</li>
17
- </ul>
18
- <h3>The Occasion and Context of Revelation of QS Ar Rahman</h3>
19
- <p>The occasion and context of revelation of QS Ar Rahman are related to the events that took place in Medina, after the migration of the Prophet Muhammad (peace be upon him) and his companions from Mecca. The surah was revealed to address the challenges and opportunities that the Muslim community faced in their new environment, such as:</p>
20
- <ul>
21
- <li>The interaction and coexistence with the Jews, Christians, and polytheists of Medina.</li>
22
- <li>The establishment and consolidation of the Islamic state and society.</li>
23
- <li>The expansion and propagation of Islam to other regions and peoples.</li>
24
- <li>The defense and security of the Muslim community from external threats and enemies.</li>
25
- </ul>
26
- <p>The surah was also revealed to highlight the contrast between the mercy and justice of Allah, and the ingratitude and rebellion of some of His creation, especially the jinn and mankind. The surah was also revealed to show the beauty and harmony of Allah's creation, and the signs and proofs of His oneness and lordship.</p>
27
- <h2>How to Download QS Ar Rahman in Different Formats and Languages</h2>
28
- <p>If you want to download QS Ar Rahman in different formats and languages, you have many options available online. You can choose from various websites and apps that offer Quran recitations, translations, tafsirs, and other resources. Here are some of the best sources that you can use:</p>
29
- <p>Download qs ar rahman full mp3<br />
30
- Download qs ar rahman 1-78 ayat<br />
31
- Download qs ar rahman latin dan terjemahan<br />
32
- Download qs ar rahman muzammil hasballah<br />
33
- Download qs ar rahman mishary rashid alafasy<br />
34
- Download qs ar rahman muhammad taha al junayd<br />
35
- Download qs ar rahman maghfirah m hussein<br />
36
- Download qs ar rahman hanan attaki<br />
37
- Download qs ar rahman abdul basit abdus samad<br />
38
- Download qs ar rahman saad al ghamdi<br />
39
- Download qs ar rahman hani ar rifai<br />
40
- Download qs ar rahman syekh sudais<br />
41
- Download qs ar rahman nasser al qatami<br />
42
- Download qs ar rahman ahmad saud<br />
43
- Download qs ar rahman yusuf mansur<br />
44
- Download qs ar rahman muhammad thaha dewasa<br />
45
- Download qs ar rahman fatih seferagic<br />
46
- Download qs ar rahman wafiq azizah<br />
47
- Download qs ar rahman yusuf kalo<br />
48
- Download qs ar rahman imam masjidil haram<br />
49
- Download qs ar rahman muammar za<br />
50
- Download qs ar rahman muhammad toha al junaid dewasa<br />
51
- Download qs ar rahman salim bahanan<br />
52
- Download qs ar rahman idris abkar<br />
53
- Download qs ar rahman maher al muaiqly<br />
54
- Download qs ar rahman ahmad al ajmi<br />
55
- Download qs ar rahman abdurrahman as sudais<br />
56
- Download qs ar rahman syaikh ali jaber<br />
57
- Download qs ar rahman syekh ali hudaify<br />
58
- Download qs ar rahman syekh shuraim<br />
59
- Download qs ar rahman syekh mahmud khalil al husary<br />
60
- Download qs ar rahman syekh abdullah awad al juhani<br />
61
- Download qs ar rahman syekh abdullah basfar<br />
62
- Download qs ar rahman syekh abdul aziz al ahmad<br />
63
- Download qs ar rahman syekh abdul muhsin al qasim<br />
64
- Download qs ar rahman syekh abdul wadud haneef<br />
65
- Download qs ar rahman syekh abu bakr ash shatri<br />
66
- Download qs ar rahman syekh adil al kalbani<br />
67
- Download qs ar rahman syekh ahmad bin ali al ajmy<br />
68
- Download qs ar rahman syekh akram al alaqimy<br />
69
- Download qs ar rahman syekh ali abdurrahman al hudzaify<br />
70
- Download qs ar rahman syekh bandar baleela<br />
71
- Download qs ar rahman syekh fawaz al kaabi<br />
72
- Download qs ar rahman syekh faysal noman<br />
73
- Download qs ar rahman syekh ibrahim al akhdar<br />
74
- Download qs ar rahman syekh ibrahim al jibreen<br />
75
- Download qs ar rahman syekh ibrahim as sudaisi an nabawy</p>
76
- <h3>Download QS Ar Rahman in MP3 and Audio Formats</h3>
77
- <p>If you want to download QS Ar Rahman in MP3 and audio formats, you can use the following websites:</p>
78
- <h4>Quran.com: The Best Source for High Quality Quran Recitations</h4>
79
- <p>Quran.com is one of the most popular and reliable websites for Quran recitations. It offers high quality audio files by various reciters from different countries and styles. You can listen to or download Surah Ar Rahman by any reciter of your choice, such as Abdul Basit, Mishary Rashid, Saad Al Ghamdi, Maher Al Mueaqly, etc. You can also choose from different translations in English, Urdu, Indonesian, French, Spanish, etc. You can also read the Arabic text along with the audio, or view the word by word translation and transliteration. You can access Quran.com from any device, such as your computer, smartphone, or tablet.</p>
80
- <p>To download Surah Ar Rahman from Quran.com, you can follow these steps:</p>
81
- <ol>
82
- <li>Go to [Quran.com] and search for Surah Ar Rahman in the search bar.</li>
83
- <li>Select the reciter and translation of your choice from the drop-down menus.</li>
84
- <li>Click on the play button to listen to the surah online, or click on the download button to save it on your device.</li>
85
- <li>You can also click on the settings icon to adjust the speed, repeat mode, night mode, etc.</li>
86
- </ol>
87
- <h4>QuranicAudio.com: Stream or Download Quran Audio by Various Reciters</h4>
88
- <p>QuranicAudio.com is another great website for Quran audio. It offers a large collection of Quran recitations by various reciters from different countries and styles. You can stream or download Surah Ar Rahman by any reciter of your choice, such as Abdullah Basfar, Abdur Rahman As Sudais, Abu Bakr Al Shatri, Ahmed Al Ajmi, etc. You can also choose from different translations in English, Urdu, Indonesian, French, Spanish, etc. You can also read the Arabic text along with the audio.</p>
89
- <p>To download Surah Ar Rahman from QuranicAudio.com, you can follow these steps:</p>
90
- <ol>
91
- <li>Go to [QuranicAudio.com] and search for Surah Ar Rahman in the search bar.</li>
92
- <li>Select the reciter and translation of your choice from the drop-down menus.</li>
93
- <li>Click on the play button to listen to the surah online, or right-click on the download button and select "Save link as" to save it on your device.</li>
94
- <li>You can also click on the settings icon to adjust the speed, repeat mode, night mode, etc.</li>
95
- </ol>
96
- <h4>QuranCentral.com: Listen to Surah Ar Rahman by Different Qaris and Translations</h4>
97
- <p>QuranCentral.com is another excellent website for Quran audio. It offers a wide range of Quran recitations by different qaris (reciters) from different countries and styles. You can listen to or download Surah Ar Rahman by any qari of your choice, such as Abdul Rahman Al Sudais, Muhammad Siddiq Al Minshawi, Muhammad Jibreel, Nasser Al Qatami, etc. You can also choose from different translations in English, Urdu, Indonesian, French, Spanish, etc. You can also read the Arabic text along with the audio, or view the word by word translation and transliteration. You can access QuranCentral.com from any device, such as your computer, smartphone, or tablet.</p>
98
- <p>To download Surah Ar Rahman from QuranCentral.com, you can follow these steps:</p>
99
- <ol>
100
- <li>Go to [QuranCentral.com] and search for Surah Ar Rahman in the search bar.</li>
101
- <li>Select the qari and translation of your choice from the drop-down menus.</li>
102
- <li>Click on the play button to listen to the surah online, or click on the download button to save it on your device.</li>
103
- <li>You can also click on the settings icon to adjust the speed, repeat mode, night mode, etc.</li>
104
- </ol>
105
- <h3>Download QS Ar Rahman in PDF and Text Formats</h3>
106
- <p>If you want to download QS Ar Rahman in PDF and text formats, you can use the following websites:</p>
107
- <h4>LiteQuran.net: Read Surah Ar Rahman in Arabic, Latin, and Indonesian</h4>
108
- <p>LiteQuran.net is a simple and easy-to-use website for reading Quran online. It offers Surah Ar Rahman in Arabic, Latin (transliteration), and Indonesian (translation). You can also listen to the audio recitation by various reciters. You can also view the tajweed rules and color codes for each verse. You can access LiteQuran.net from any device, such as your computer, smartphone, or tablet.</p>
109
- <p>To download Surah Ar Rahman from LiteQuran.net, you can follow these steps:</p>
110
- <ol>
111
- <li>Go to [LiteQuran.net] and search for Surah Ar Rahman in the search bar.</li>
112
- <li>Select the language and reciter of your choice from the drop-down menus.</li>
113
- <li>Click on the play button to listen to the surah online, or click on the PDF icon to download it on your device.</li>
114
- <li>You can also click on the settings icon to adjust the font size, color theme, night mode, etc.</li>
115
- </ol>
116
- <h4>QuranBest.com: Read Surah Ar Rahman in Arabic and English with Tafsir</h4>
117
- <p>QuranBest.com is a comprehensive and interactive website for reading Quran online. It offers Surah Ar Rahman in Arabic and English (translation) with tafsir (explanation) by various scholars and sources. You can also listen to the audio recitation by various reciters. You can also view the word by word translation and transliteration for each verse. You can also access other features such as bookmarks, notes, highlights, etc. You can access QuranBest.com from any device, such as your computer, smartphone, or tablet.</p>
118
- <p>To download Surah Ar Rahman from QuranBest.com, you can follow these steps:</p>
119
- <ol>
120
- <li>Go to [QuranBest.com] and search for Surah Ar Rahman in the search bar.</li>
121
- <li>Select the language, reciter, and tafsir of your choice from the drop-down menus.</li>
122
- <li>Click on the play button to listen to the surah online, or click on the PDF icon to download it on your device.</li>
123
- <li>You can also click on the settings icon to adjust the font size, color theme, night mode, etc.</li>
124
- </ol>
125
- <h4>TafsirWeb.com: Read Surah Ar Rahman in Arabic and Indonesian with Tafsir</h4>
126
- <p>TafsirWeb.com is a dedicated website for reading Quran tafsir online. It offers Surah Ar Rahman in Arabic and Indonesian (translation) with tafsir (explanation) by various scholars and sources. You can also listen to the audio recitation by various reciters. You can also view the word by word translation and transliteration for each verse. You can also access other features such as bookmarks, notes, highlights, etc. You can access TafsirWeb.com from any device, such as your computer, smartphone, or tablet.</p>
127
- <p>To download Surah Ar Rahman from TafsirWeb.com, you can follow these steps:</p>
128
- <ol>
129
- <li>Go to [TafsirWeb.com] and search for Surah Ar Rahman in the search bar.</li>
130
- <li>Select the language, reciter and tafsir of your choice from the drop-down menus.</li>
131
- <li>Click on the play button to listen to the surah online, or click on the PDF icon to download it on your device.</li>
132
- <li>You can also click on the settings icon to adjust the font size, color theme, night mode, etc.</li>
133
- </ol>
134
- <h2>How to Benefit from Listening to or Reading QS Ar Rahman</h2>
135
- <p>Listening to or reading QS Ar Rahman is not enough to benefit from its blessings and lessons. We also need to understand its meaning, reflect on its message, and apply its teachings in our daily life. Here are some tips on how to do that:</p>
136
- <h3>Tips for Reciting or Listening to QS Ar Rahman with Focus and Reflection</h3>
137
- <p>Reciting or listening to QS Ar Rahman with focus and reflection means paying attention to the words and their meanings, and thinking about their implications and relevance for us. Here are some tips on how to do that:</p>
138
- <ul>
139
- <li>Choose a suitable time and place where you can recite or listen to QS Ar Rahman without distractions or interruptions.</li>
140
- <li>Prepare yourself mentally and spiritually by making wudu (ablution), seeking refuge from Satan, and asking Allah for guidance and understanding.</li>
141
- <li>Recite or listen to QS Ar Rahman with a clear and melodious voice, following the rules of tajweed (proper pronunciation) and tartil (moderate speed).</li>
142
- <li>Pause at the end of each verse or section, and repeat the refrain "Maka, nikmat Tuhanmu manakah yang kamu dustakan (wahai jin dan manusia)?" This means "Then which of the favors of your Lord will you deny (O jinn and mankind)?" Try to answer this question in your mind or heart, and acknowledge Allah's favors upon you.</li>
143
- <li>Contemplate on the signs of Allah's power and wisdom in His creation, such as the sun, the moon, the stars, the plants, the animals, the seas, and the human beings. Think about how they reflect Allah's mercy and generosity towards us.</li>
144
- <li>Reflect on the consequences of denying or rejecting Allah's favors, such as the punishment of hellfire or the deprivation of paradise. Think about how you can avoid them by being grateful, humble, and obedient to Allah.</li>
145
- <li>Remember the promises of Allah's reward for those who believe in Him and do good deeds, such as the gardens of paradise or the companionship of the righteous. Think about how you can attain them by following Allah's guidance and commands.</li>
146
- </ul>
147
- <h3>Tips for Applying the Lessons of QS Ar Rahman in Daily Life</h3>
148
- <p>Applying the lessons of QS Ar Rahman in daily life means living according to its teachings and values, and implementing its wisdom and advice in our actions and interactions. Here are some tips on how to do that:</p>
149
- <ul>
150
- <li>Be grateful for Allah's favors and blessings upon you, and express your gratitude by praising Him, thanking Him, and worshipping Him.</li>
151
- <li>Be humble before Allah and His creation, and avoid arrogance, pride, and self-conceit. Recognize your limitations and weaknesses, and seek Allah's help and forgiveness.</li>
152
- <li>Be obedient to Allah and His messenger (peace be upon him), and follow their commands and prohibitions. Avoid sins, innovations, and deviations from the straight path.</li>
153
- <li>Be generous with Allah's favors and blessings upon you, and share them with others. Give charity, help the needy, support the cause of Islam, and spread goodness.</li>
154
- <li>Be respectful of Allah's creation, and treat them with kindness, justice, and compassion. Do not harm them, abuse them, or waste them. Appreciate their diversity and beauty.</li>
155
- <li>Be hopeful of Allah's mercy and forgiveness, and do not despair or give up. Repent from your sins, seek His pardon, and trust in His plan.</li>
156
- </ul>
157
- <h1>Conclusion</h1>
158
- <p>QS Ar Rahman is a beautiful and powerful surah that reminds us of Allah's mercy and favors, and invites us to reflect on His signs and proofs. It also warns us of the consequences of denying or rejecting His favors, and encourages us to be grateful, humble, and obedient to Him. We can benefit from this surah by downloading it in different formats and languages, and by reciting or listening to it with focus and reflection. We can also apply its lessons in our daily life by living according to its teachings and values. We ask Allah to make us among those who recite, listen, understand, and act upon QS Ar Rahman. Ameen.</p>
159
- <h1>FAQs</h1>
160
- <p>Here are some frequently asked questions about QS Ar Rahman:</p>
161
- <ol>
162
- <li>What is the main theme of QS Ar Rahman?</li>
163
- <p>The main theme of QS Ar Rahman is the mercy and favors of Allah, and the response of His creation to them.</p>
164
- <li>How many times is the refrain "Then which of the favors of your Lord will you deny (O jinn and mankind)?" repeated in QS Ar Rahman?</li>
165
- <p>The refrain is repeated 31 times throughout the surah.</p>
166
- <li>What are some of the favors of Allah that are mentioned in QS Ar Rahman?</li>
167
- <p>Some of the favors of Allah that are mentioned in QS Ar Rahman are: the Quran, the creation of man and jinn, the sun and the moon, the stars and the trees, the sky and the earth, the seas and the rivers, the fruits and the grains, the pearls and the corals, the gardens and the springs, etc.</p>
168
- <li>What are some of the consequences of denying or rejecting Allah's favors that are mentioned in QS Ar Rahman?</li>
169
- <p>Some of the consequences of denying or rejecting Allah's favors that are mentioned in QS Ar Rahman are: the punishment of hellfire, the scorching wind and boiling water, the chains and iron collars, etc.</p>
170
- <li>What are some of the rewards for those who believe in Allah and do good deeds that are mentioned in QS Ar Rahman?</li>
171
- <p>Some of the rewards for those who believe in Allah and do good deeds that are mentioned in QS Ar Rahman are: the gardens of paradise, the companionship of pure spouses, the honor and dignity from Allah, etc.</p>
172
- </ol></p> 197e85843d<br />
173
- <br />
174
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2ndelement/voicevox/voicevox_engine/dev/synthesis_engine/__init__.py DELETED
@@ -1,3 +0,0 @@
1
- from .mock import MockSynthesisEngine
2
-
3
- __all__ = ["MockSynthesisEngine"]
 
 
 
 
spaces/4Taps/SadTalker/src/utils/preprocess.py DELETED
@@ -1,152 +0,0 @@
1
- import numpy as np
2
- import cv2, os, sys, torch
3
- from tqdm import tqdm
4
- from PIL import Image
5
-
6
- # 3dmm extraction
7
- from src.face3d.util.preprocess import align_img
8
- from src.face3d.util.load_mats import load_lm3d
9
- from src.face3d.models import networks
10
- from src.face3d.extract_kp_videos import KeypointExtractor
11
-
12
- from scipy.io import loadmat, savemat
13
- from src.utils.croper import Croper
14
-
15
- import warnings
16
- warnings.filterwarnings("ignore")
17
-
18
- def split_coeff(coeffs):
19
- """
20
- Return:
21
- coeffs_dict -- a dict of torch.tensors
22
-
23
- Parameters:
24
- coeffs -- torch.tensor, size (B, 256)
25
- """
26
- id_coeffs = coeffs[:, :80]
27
- exp_coeffs = coeffs[:, 80: 144]
28
- tex_coeffs = coeffs[:, 144: 224]
29
- angles = coeffs[:, 224: 227]
30
- gammas = coeffs[:, 227: 254]
31
- translations = coeffs[:, 254:]
32
- return {
33
- 'id': id_coeffs,
34
- 'exp': exp_coeffs,
35
- 'tex': tex_coeffs,
36
- 'angle': angles,
37
- 'gamma': gammas,
38
- 'trans': translations
39
- }
40
-
41
-
42
- class CropAndExtract():
43
- def __init__(self, path_of_lm_croper, path_of_net_recon_model, dir_of_BFM_fitting, device):
44
-
45
- self.croper = Croper(path_of_lm_croper)
46
- self.kp_extractor = KeypointExtractor(device)
47
- self.net_recon = networks.define_net_recon(net_recon='resnet50', use_last_fc=False, init_path='').to(device)
48
- checkpoint = torch.load(path_of_net_recon_model, map_location=torch.device(device))
49
- self.net_recon.load_state_dict(checkpoint['net_recon'])
50
- self.net_recon.eval()
51
- self.lm3d_std = load_lm3d(dir_of_BFM_fitting)
52
- self.device = device
53
-
54
- def generate(self, input_path, save_dir, crop_or_resize='crop'):
55
-
56
- pic_size = 256
57
- pic_name = os.path.splitext(os.path.split(input_path)[-1])[0]
58
-
59
- landmarks_path = os.path.join(save_dir, pic_name+'_landmarks.txt')
60
- coeff_path = os.path.join(save_dir, pic_name+'.mat')
61
- png_path = os.path.join(save_dir, pic_name+'.png')
62
-
63
- #load input
64
- if not os.path.isfile(input_path):
65
- raise ValueError('input_path must be a valid path to video/image file')
66
- elif input_path.split('.')[1] in ['jpg', 'png', 'jpeg']:
67
- # loader for first frame
68
- full_frames = [cv2.imread(input_path)]
69
- fps = 25
70
- else:
71
- # loader for videos
72
- video_stream = cv2.VideoCapture(input_path)
73
- fps = video_stream.get(cv2.CAP_PROP_FPS)
74
- full_frames = []
75
- while 1:
76
- still_reading, frame = video_stream.read()
77
- if not still_reading:
78
- video_stream.release()
79
- break
80
- full_frames.append(frame)
81
- break
82
- x_full_frames = [cv2.cvtColor(full_frames[0], cv2.COLOR_BGR2RGB) ]
83
-
84
- if crop_or_resize.lower() == 'crop': # default crop
85
- x_full_frames, crop, quad = self.croper.crop(x_full_frames, xsize=pic_size)
86
- clx, cly, crx, cry = crop
87
- lx, ly, rx, ry = quad
88
- lx, ly, rx, ry = int(lx), int(ly), int(rx), int(ry)
89
- oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx
90
- original_size = (ox2 - ox1, oy2 - oy1)
91
- else:
92
- oy1, oy2, ox1, ox2 = 0, x_full_frames[0].shape[0], 0, x_full_frames[0].shape[1]
93
- original_size = (ox2 - ox1, oy2 - oy1)
94
-
95
- frames_pil = [Image.fromarray(cv2.resize(frame,(pic_size, pic_size))) for frame in x_full_frames]
96
- if len(frames_pil) == 0:
97
- print('No face is detected in the input file')
98
- return None, None
99
-
100
- # save crop info
101
- for frame in frames_pil:
102
- cv2.imwrite(png_path, cv2.cvtColor(np.array(frame), cv2.COLOR_RGB2BGR))
103
-
104
- # 2. get the landmark according to the detected face.
105
- if not os.path.isfile(landmarks_path):
106
- lm = self.kp_extractor.extract_keypoint(frames_pil, landmarks_path)
107
- else:
108
- print(' Using saved landmarks.')
109
- lm = np.loadtxt(landmarks_path).astype(np.float32)
110
- lm = lm.reshape([len(x_full_frames), -1, 2])
111
-
112
- if not os.path.isfile(coeff_path):
113
- # load 3dmm paramter generator from Deep3DFaceRecon_pytorch
114
- video_coeffs, full_coeffs = [], []
115
- for idx in tqdm(range(len(frames_pil)), desc='3DMM Extraction In Video:'):
116
- frame = frames_pil[idx]
117
- W,H = frame.size
118
- lm1 = lm[idx].reshape([-1, 2])
119
-
120
- if np.mean(lm1) == -1:
121
- lm1 = (self.lm3d_std[:, :2]+1)/2.
122
- lm1 = np.concatenate(
123
- [lm1[:, :1]*W, lm1[:, 1:2]*H], 1
124
- )
125
- else:
126
- lm1[:, -1] = H - 1 - lm1[:, -1]
127
-
128
- trans_params, im1, lm1, _ = align_img(frame, lm1, self.lm3d_std)
129
-
130
- trans_params = np.array([float(item) for item in np.hsplit(trans_params, 5)]).astype(np.float32)
131
- im_t = torch.tensor(np.array(im1)/255., dtype=torch.float32).permute(2, 0, 1).to(self.device).unsqueeze(0)
132
-
133
- with torch.no_grad():
134
- full_coeff = self.net_recon(im_t)
135
- coeffs = split_coeff(full_coeff)
136
-
137
- pred_coeff = {key:coeffs[key].cpu().numpy() for key in coeffs}
138
-
139
- pred_coeff = np.concatenate([
140
- pred_coeff['exp'],
141
- pred_coeff['angle'],
142
- pred_coeff['trans'],
143
- trans_params[2:][None],
144
- ], 1)
145
- video_coeffs.append(pred_coeff)
146
- full_coeffs.append(full_coeff.cpu().numpy())
147
-
148
- semantic_npy = np.array(video_coeffs)[:,0]
149
-
150
- savemat(coeff_path, {'coeff_3dmm': semantic_npy, 'full_3dmm': np.array(full_coeffs)[0]})
151
-
152
- return coeff_path, png_path, original_size
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/rvc_for_realtime.py DELETED
@@ -1,297 +0,0 @@
1
- import faiss, torch, traceback, parselmouth, numpy as np, torchcrepe, torch.nn as nn, pyworld
2
- from fairseq import checkpoint_utils
3
- from lib.infer_pack.models import (
4
- SynthesizerTrnMs256NSFsid,
5
- SynthesizerTrnMs256NSFsid_nono,
6
- SynthesizerTrnMs768NSFsid,
7
- SynthesizerTrnMs768NSFsid_nono,
8
- )
9
- import os, sys
10
- from time import time as ttime
11
- import torch.nn.functional as F
12
- import scipy.signal as signal
13
-
14
- now_dir = os.getcwd()
15
- sys.path.append(now_dir)
16
- from configs.config import Config
17
- from multiprocessing import Manager as M
18
-
19
- mm = M()
20
- config = Config()
21
-
22
-
23
- class RVC:
24
- def __init__(
25
- self, key, pth_path, index_path, index_rate, n_cpu, inp_q, opt_q, device
26
- ) -> None:
27
- """
28
- 初始化
29
- """
30
- try:
31
- global config
32
- self.inp_q = inp_q
33
- self.opt_q = opt_q
34
- self.device = device
35
- self.f0_up_key = key
36
- self.time_step = 160 / 16000 * 1000
37
- self.f0_min = 50
38
- self.f0_max = 1100
39
- self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700)
40
- self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700)
41
- self.sr = 16000
42
- self.window = 160
43
- self.n_cpu = n_cpu
44
- if index_rate != 0:
45
- self.index = faiss.read_index(index_path)
46
- self.big_npy = self.index.reconstruct_n(0, self.index.ntotal)
47
- print("index search enabled")
48
- self.index_rate = index_rate
49
- models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
50
- ["hubert_base.pt"],
51
- suffix="",
52
- )
53
- hubert_model = models[0]
54
- hubert_model = hubert_model.to(config.device)
55
- if config.is_half:
56
- hubert_model = hubert_model.half()
57
- else:
58
- hubert_model = hubert_model.float()
59
- hubert_model.eval()
60
- self.model = hubert_model
61
- cpt = torch.load(pth_path, map_location="cpu")
62
- self.tgt_sr = cpt["config"][-1]
63
- cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0]
64
- self.if_f0 = cpt.get("f0", 1)
65
- self.version = cpt.get("version", "v1")
66
- if self.version == "v1":
67
- if self.if_f0 == 1:
68
- self.net_g = SynthesizerTrnMs256NSFsid(
69
- *cpt["config"], is_half=config.is_half
70
- )
71
- else:
72
- self.net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
73
- elif self.version == "v2":
74
- if self.if_f0 == 1:
75
- self.net_g = SynthesizerTrnMs768NSFsid(
76
- *cpt["config"], is_half=config.is_half
77
- )
78
- else:
79
- self.net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
80
- del self.net_g.enc_q
81
- print(self.net_g.load_state_dict(cpt["weight"], strict=False))
82
- self.net_g.eval().to(device)
83
- if config.is_half:
84
- self.net_g = self.net_g.half()
85
- else:
86
- self.net_g = self.net_g.float()
87
- self.is_half = config.is_half
88
- except:
89
- print(traceback.format_exc())
90
-
91
- def get_f0_post(self, f0):
92
- f0_min = self.f0_min
93
- f0_max = self.f0_max
94
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
95
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
96
- f0bak = f0.copy()
97
- f0_mel = 1127 * np.log(1 + f0 / 700)
98
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
99
- f0_mel_max - f0_mel_min
100
- ) + 1
101
- f0_mel[f0_mel <= 1] = 1
102
- f0_mel[f0_mel > 255] = 255
103
- f0_coarse = np.rint(f0_mel).astype(np.int_)
104
- return f0_coarse, f0bak
105
-
106
- def get_f0(self, x, f0_up_key, n_cpu, method="harvest"):
107
- n_cpu = int(n_cpu)
108
- if method == "crepe":
109
- return self.get_f0_crepe(x, f0_up_key)
110
- if method == "rmvpe":
111
- return self.get_f0_rmvpe(x, f0_up_key)
112
- if method == "pm":
113
- p_len = x.shape[0] // 160
114
- f0 = (
115
- parselmouth.Sound(x, 16000)
116
- .to_pitch_ac(
117
- time_step=0.01,
118
- voicing_threshold=0.6,
119
- pitch_floor=50,
120
- pitch_ceiling=1100,
121
- )
122
- .selected_array["frequency"]
123
- )
124
-
125
- pad_size = (p_len - len(f0) + 1) // 2
126
- if pad_size > 0 or p_len - len(f0) - pad_size > 0:
127
- print(pad_size, p_len - len(f0) - pad_size)
128
- f0 = np.pad(
129
- f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant"
130
- )
131
-
132
- f0 *= pow(2, f0_up_key / 12)
133
- return self.get_f0_post(f0)
134
- if n_cpu == 1:
135
- f0, t = pyworld.harvest(
136
- x.astype(np.double),
137
- fs=16000,
138
- f0_ceil=1100,
139
- f0_floor=50,
140
- frame_period=10,
141
- )
142
- f0 = signal.medfilt(f0, 3)
143
- f0 *= pow(2, f0_up_key / 12)
144
- return self.get_f0_post(f0)
145
- f0bak = np.zeros(x.shape[0] // 160, dtype=np.float64)
146
- length = len(x)
147
- part_length = int(length / n_cpu / 160) * 160
148
- ts = ttime()
149
- res_f0 = mm.dict()
150
- for idx in range(n_cpu):
151
- tail = part_length * (idx + 1) + 320
152
- if idx == 0:
153
- self.inp_q.put((idx, x[:tail], res_f0, n_cpu, ts))
154
- else:
155
- self.inp_q.put(
156
- (idx, x[part_length * idx - 320 : tail], res_f0, n_cpu, ts)
157
- )
158
- while 1:
159
- res_ts = self.opt_q.get()
160
- if res_ts == ts:
161
- break
162
- f0s = [i[1] for i in sorted(res_f0.items(), key=lambda x: x[0])]
163
- for idx, f0 in enumerate(f0s):
164
- if idx == 0:
165
- f0 = f0[:-3]
166
- elif idx != n_cpu - 1:
167
- f0 = f0[2:-3]
168
- else:
169
- f0 = f0[2:-1]
170
- f0bak[
171
- part_length * idx // 160 : part_length * idx // 160 + f0.shape[0]
172
- ] = f0
173
- f0bak = signal.medfilt(f0bak, 3)
174
- f0bak *= pow(2, f0_up_key / 12)
175
- return self.get_f0_post(f0bak)
176
-
177
- def get_f0_crepe(self, x, f0_up_key):
178
- audio = torch.tensor(np.copy(x))[None].float()
179
- f0, pd = torchcrepe.predict(
180
- audio,
181
- self.sr,
182
- 160,
183
- self.f0_min,
184
- self.f0_max,
185
- "full",
186
- batch_size=512,
187
- device=self.device,
188
- return_periodicity=True,
189
- )
190
- pd = torchcrepe.filter.median(pd, 3)
191
- f0 = torchcrepe.filter.mean(f0, 3)
192
- f0[pd < 0.1] = 0
193
- f0 = f0[0].cpu().numpy()
194
- f0 *= pow(2, f0_up_key / 12)
195
- return self.get_f0_post(f0)
196
-
197
- def get_f0_rmvpe(self, x, f0_up_key):
198
- if hasattr(self, "model_rmvpe") == False:
199
- from infer.lib.rmvpe import RMVPE
200
-
201
- print("loading rmvpe model")
202
- self.model_rmvpe = RMVPE(
203
- "rmvpe.pt", is_half=self.is_half, device=self.device
204
- )
205
- # self.model_rmvpe = RMVPE("aug2_58000_half.pt", is_half=self.is_half, device=self.device)
206
- f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)
207
- f0 *= pow(2, f0_up_key / 12)
208
- return self.get_f0_post(f0)
209
-
210
- def infer(
211
- self,
212
- feats: torch.Tensor,
213
- indata: np.ndarray,
214
- rate1,
215
- rate2,
216
- cache_pitch,
217
- cache_pitchf,
218
- f0method,
219
- ) -> np.ndarray:
220
- feats = feats.view(1, -1)
221
- if config.is_half:
222
- feats = feats.half()
223
- else:
224
- feats = feats.float()
225
- feats = feats.to(self.device)
226
- t1 = ttime()
227
- with torch.no_grad():
228
- padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
229
- inputs = {
230
- "source": feats,
231
- "padding_mask": padding_mask,
232
- "output_layer": 9 if self.version == "v1" else 12,
233
- }
234
- logits = self.model.extract_features(**inputs)
235
- feats = (
236
- self.model.final_proj(logits[0]) if self.version == "v1" else logits[0]
237
- )
238
- t2 = ttime()
239
- try:
240
- if hasattr(self, "index") and self.index_rate != 0:
241
- leng_replace_head = int(rate1 * feats[0].shape[0])
242
- npy = feats[0][-leng_replace_head:].cpu().numpy().astype("float32")
243
- score, ix = self.index.search(npy, k=8)
244
- weight = np.square(1 / score)
245
- weight /= weight.sum(axis=1, keepdims=True)
246
- npy = np.sum(self.big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
247
- if config.is_half:
248
- npy = npy.astype("float16")
249
- feats[0][-leng_replace_head:] = (
250
- torch.from_numpy(npy).unsqueeze(0).to(self.device) * self.index_rate
251
- + (1 - self.index_rate) * feats[0][-leng_replace_head:]
252
- )
253
- else:
254
- print("index search FAIL or disabled")
255
- except:
256
- traceback.print_exc()
257
- print("index search FAIL")
258
- feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
259
- t3 = ttime()
260
- if self.if_f0 == 1:
261
- pitch, pitchf = self.get_f0(indata, self.f0_up_key, self.n_cpu, f0method)
262
- cache_pitch[:] = np.append(cache_pitch[pitch[:-1].shape[0] :], pitch[:-1])
263
- cache_pitchf[:] = np.append(
264
- cache_pitchf[pitchf[:-1].shape[0] :], pitchf[:-1]
265
- )
266
- p_len = min(feats.shape[1], 13000, cache_pitch.shape[0])
267
- else:
268
- cache_pitch, cache_pitchf = None, None
269
- p_len = min(feats.shape[1], 13000)
270
- t4 = ttime()
271
- feats = feats[:, :p_len, :]
272
- if self.if_f0 == 1:
273
- cache_pitch = cache_pitch[:p_len]
274
- cache_pitchf = cache_pitchf[:p_len]
275
- cache_pitch = torch.LongTensor(cache_pitch).unsqueeze(0).to(self.device)
276
- cache_pitchf = torch.FloatTensor(cache_pitchf).unsqueeze(0).to(self.device)
277
- p_len = torch.LongTensor([p_len]).to(self.device)
278
- ii = 0 # sid
279
- sid = torch.LongTensor([ii]).to(self.device)
280
- with torch.no_grad():
281
- if self.if_f0 == 1:
282
- infered_audio = (
283
- self.net_g.infer(
284
- feats, p_len, cache_pitch, cache_pitchf, sid, rate2
285
- )[0][0, 0]
286
- .data.cpu()
287
- .float()
288
- )
289
- else:
290
- infered_audio = (
291
- self.net_g.infer(feats, p_len, sid, rate2)[0][0, 0]
292
- .data.cpu()
293
- .float()
294
- )
295
- t5 = ttime()
296
- print("time->fea-index-f0-model:", t2 - t1, t3 - t2, t4 - t3, t5 - t4)
297
- return infered_audio
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/StyleGANEX/app.py DELETED
@@ -1,124 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import argparse
4
- import pathlib
5
- import torch
6
- import gradio as gr
7
-
8
- import os
9
-
10
- from webUI.app_task import *
11
- from webUI.styleganex_model import Model
12
-
13
- def parse_args() -> argparse.Namespace:
14
- parser = argparse.ArgumentParser()
15
- parser.add_argument('--device', type=str, default='cpu')
16
- parser.add_argument('--theme', type=str)
17
- parser.add_argument('--share', action='store_true')
18
- parser.add_argument('--port', type=int)
19
- parser.add_argument('--disable-queue',
20
- dest='enable_queue',
21
- action='store_false')
22
- return parser.parse_args()
23
-
24
- is_shared_ui = True if "AIFILMS/StyleGANEX" in os.environ['SPACE_ID'] else False
25
-
26
- DESCRIPTION = '''
27
- <div align=center>
28
- <h1 style="font-weight: 900; margin-bottom: 7px;">
29
- Face Manipulation with <a href="https://github.com/williamyang1991/StyleGANEX">StyleGANEX</a>
30
- </h1>
31
- <p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings.
32
- <a href="https://huggingface.co/spaces/PKUWilliamYang/StyleGANEX?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>
33
- <p/>
34
- <img style="margin-top: 0em" src="https://raw.githubusercontent.com/williamyang1991/tmpfile/master/imgs/example.jpg" alt="example">
35
- </div>
36
- '''
37
- ARTICLE = r"""
38
- If StyleGANEX is helpful, please help to ⭐ the <a href='https://github.com/williamyang1991/StyleGANEX' target='_blank'>Github Repo</a>. Thanks!
39
- [![GitHub Stars](https://img.shields.io/github/stars/williamyang1991/StyleGANEX?style=social)](https://github.com/williamyang1991/StyleGANEX)
40
- ---
41
- 📝 **Citation**
42
- If our work is useful for your research, please consider citing:
43
- ```bibtex
44
- @article{yang2023styleganex,
45
- title = {StyleGANEX: StyleGAN-Based Manipulation Beyond Cropped Aligned Faces},
46
- author = {Yang, Shuai and Jiang, Liming and Liu, Ziwei and and Loy, Chen Change},
47
- journal = {arXiv preprint arXiv:2303.06146},
48
- year={2023},
49
- }
50
- ```
51
- 📋 **License**
52
- This project is licensed under <a rel="license" href="https://github.com/williamyang1991/VToonify/blob/main/LICENSE.md">S-Lab License 1.0</a>.
53
- Redistribution and use for non-commercial purposes should follow this license.
54
-
55
- 📧 **Contact**
56
- If you have any questions, please feel free to reach me out at <b>[email protected]</b>.
57
- """
58
-
59
- FOOTER = '<div align=center><img id="visitor-badge" alt="visitor badge" src="https://visitor-badge.laobi.icu/badge?page_id=williamyang1991/styleganex" /></div>'
60
-
61
- def main():
62
- args = parse_args()
63
- args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
64
- print('*** Now using %s.'%(args.device))
65
- model = Model(device=args.device)
66
-
67
-
68
- torch.hub.download_url_to_file('https://raw.githubusercontent.com/williamyang1991/StyleGANEX/main/data/234_sketch.jpg',
69
- '234_sketch.jpg')
70
- torch.hub.download_url_to_file('https://github.com/williamyang1991/StyleGANEX/raw/main/output/ILip77SbmOE_inversion.pt',
71
- 'ILip77SbmOE_inversion.pt')
72
- torch.hub.download_url_to_file('https://raw.githubusercontent.com/williamyang1991/StyleGANEX/main/data/ILip77SbmOE.png',
73
- 'ILip77SbmOE.png')
74
- torch.hub.download_url_to_file('https://raw.githubusercontent.com/williamyang1991/StyleGANEX/main/data/ILip77SbmOE_mask.png',
75
- 'ILip77SbmOE_mask.png')
76
- torch.hub.download_url_to_file('https://raw.githubusercontent.com/williamyang1991/StyleGANEX/main/data/pexels-daniel-xavier-1239291.jpg',
77
- 'pexels-daniel-xavier-1239291.jpg')
78
- torch.hub.download_url_to_file('https://github.com/williamyang1991/StyleGANEX/raw/main/data/529_2.mp4',
79
- '529_2.mp4')
80
- torch.hub.download_url_to_file('https://github.com/williamyang1991/StyleGANEX/raw/main/data/684.mp4',
81
- '684.mp4')
82
- torch.hub.download_url_to_file('https://github.com/williamyang1991/StyleGANEX/raw/main/data/pexels-anthony-shkraba-production-8136210.mp4',
83
- 'pexels-anthony-shkraba-production-8136210.mp4')
84
-
85
-
86
- with gr.Blocks(css='style.css') as demo:
87
- if(is_shared_ui):
88
- with gr.Box():
89
- top_description = gr.HTML(f'''
90
- <div class="gr-prose" style="max-width: 80%">
91
- <h2 style="margin-top: 0">Attention - This Space doesn't work in this shared UI</h2>
92
- <p>For it to work, you can access the <a href="https://huggingface.co/spaces/PKUWilliamYang/StyleGANEX">original</a> or duplicate this Space and run it on your own profile using a GPU.&nbsp;&nbsp;<a class="duplicate-button" style="display:inline-block" target="_blank" href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></p>
93
- </div>
94
- ''')
95
- gr.Markdown(DESCRIPTION)
96
- with gr.Tabs():
97
- with gr.TabItem('Inversion for Editing'):
98
- create_demo_inversion(model.process_inversion, allow_optimization=False)
99
- with gr.TabItem('Image Face Toonify'):
100
- create_demo_toonify(model.process_toonify)
101
- with gr.TabItem('Video Face Toonify'):
102
- create_demo_vtoonify(model.process_vtoonify, max_frame_num=12)
103
- with gr.TabItem('Image Face Editing'):
104
- create_demo_editing(model.process_editing)
105
- with gr.TabItem('Video Face Editing'):
106
- create_demo_vediting(model.process_vediting, max_frame_num=12)
107
- with gr.TabItem('Sketch2Face'):
108
- create_demo_s2f(model.process_s2f)
109
- with gr.TabItem('Mask2Face'):
110
- create_demo_m2f(model.process_m2f)
111
- with gr.TabItem('SR'):
112
- create_demo_sr(model.process_sr)
113
- gr.Markdown(ARTICLE)
114
- gr.Markdown(FOOTER)
115
-
116
- demo.launch(
117
- enable_queue=args.enable_queue,
118
- server_port=args.port,
119
- share=args.share,
120
- )
121
-
122
- if __name__ == '__main__':
123
- main()
124
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/StyleGANEX/configs/__init__.py DELETED
File without changes
spaces/AIKey/ai_date/style.css DELETED
@@ -1,28 +0,0 @@
1
- body {
2
- padding: 2rem;
3
- font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
4
- }
5
-
6
- h1 {
7
- font-size: 16px;
8
- margin-top: 0;
9
- }
10
-
11
- p {
12
- color: rgb(107, 114, 128);
13
- font-size: 15px;
14
- margin-bottom: 10px;
15
- margin-top: 5px;
16
- }
17
-
18
- .card {
19
- max-width: 620px;
20
- margin: 0 auto;
21
- padding: 16px;
22
- border: 1px solid lightgray;
23
- border-radius: 16px;
24
- }
25
-
26
- .card p:last-child {
27
- margin-bottom: 0;
28
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIZero2HeroBootcamp/AnimatedGifGallery/app.py DELETED
@@ -1,52 +0,0 @@
1
- import streamlit as st
2
- import os
3
- import random
4
-
5
- def get_gifs(directory):
6
- return [f for f in os.listdir(directory) if f.endswith('.gif')]
7
-
8
- def showAnimatedGif(gif):
9
- import streamlit as st
10
- import base64
11
- #st.markdown("![Alt Text](https://media.giphy.com/media/vFKqnCdLPNOKc/giphy.gif)")
12
- st.write('Loading: ' + gif)
13
- file_ = open(gif, "rb")
14
- contents = file_.read()
15
- data_url = base64.b64encode(contents).decode("utf-8")
16
- file_.close()
17
- st.write(data_url)
18
-
19
- st.markdown(
20
- f'<img src="data:image/gif;base64,{data_url}" alt="gif">',
21
- unsafe_allow_html=True,
22
- )
23
-
24
- def main():
25
- st.title('Animated GIFs in Streamlit')
26
-
27
- directory = './gifs' # Replace with your directory of GIFs
28
- gif_files = get_gifs(directory)
29
-
30
- num_rows = len(gif_files) // 3
31
- if len(gif_files) % 3:
32
- num_rows += 1
33
-
34
- cols = [st.columns(3) for _ in range(num_rows)]
35
-
36
- for i in range(num_rows):
37
- for j in range(3):
38
- idx = i*3 + j
39
- if idx < len(gif_files):
40
- #showAnimatedGif(os.path.join(directory, gif_files[idx]))
41
- cols[i][j].image(os.path.join(directory, gif_files[idx]), width=200)
42
-
43
- if st.button('Randomize'):
44
- random.shuffle(gif_files)
45
- for i in range(num_rows):
46
- for j in range(3):
47
- idx = i*3 + j
48
- if idx < len(gif_files):
49
- cols[i][j].image(os.path.join(directory, gif_files[idx]), width=200)
50
-
51
- if __name__ == "__main__":
52
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ARTeLab/ARTeLab-SummIT/README.md DELETED
@@ -1,30 +0,0 @@
1
- ---
2
- title: ARTeLab SummIT
3
- emoji: 📰
4
- colorFrom: indigo
5
- colorTo: green
6
- sdk: streamlit
7
- app_file: app.py
8
- pinned: false
9
- ---
10
- # Configuration
11
- `title`: _string_
12
- Display title for the Space
13
- `emoji`: _string_
14
- Space emoji (emoji-only character allowed)
15
- `colorFrom`: _string_
16
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
17
- `colorTo`: _string_
18
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
19
- `sdk`: _string_
20
- Can be either `gradio` or `streamlit`
21
- `sdk_version` : _string_
22
- Only applicable for `streamlit` SDK.
23
- See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
24
-
25
- `app_file`: _string_
26
- Path to your main application file (which contains either `gradio` or `streamlit` Python code).
27
- Path is relative to the root of the repository.
28
-
29
- `pinned`: _boolean_
30
- Whether the Space stays on top of your list.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/modules/transformer.py DELETED
@@ -1,747 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- """
8
- Transformer model, with streaming support, xformer attention support
9
- and easy causal attention with a potentially finite receptive field.
10
-
11
- See `StreamingTransformer` for more information.
12
-
13
- Unlike regular PyTorch Transformer, we make the hard choice that batches are first.
14
- """
15
-
16
- import typing as tp
17
-
18
- from einops import rearrange
19
- import torch
20
- import torch.nn as nn
21
- from torch.nn import functional as F
22
- from torch.utils.checkpoint import checkpoint as torch_checkpoint
23
- from xformers import ops
24
-
25
- from .rope import RotaryEmbedding
26
- from .streaming import StreamingModule
27
-
28
- _efficient_attention_backend: str = 'torch'
29
-
30
-
31
- def set_efficient_attention_backend(backend: str = 'torch'):
32
- # Using torch by default, it seems a bit faster on older P100 GPUs (~20% faster).
33
- global _efficient_attention_backend
34
- assert _efficient_attention_backend in ['xformers', 'torch']
35
- _efficient_attention_backend = backend
36
-
37
-
38
- def _get_attention_time_dimension() -> int:
39
- if _efficient_attention_backend == 'torch':
40
- return 2
41
- else:
42
- return 1
43
-
44
-
45
- def _is_profiled() -> bool:
46
- # Return true if we are currently running with a xformers profiler activated.
47
- try:
48
- from xformers.profiler import profiler
49
- except ImportError:
50
- return False
51
- return profiler._Profiler._CURRENT_PROFILER is not None
52
-
53
-
54
- def create_norm_fn(norm_type: str, dim: int, **kwargs) -> nn.Module:
55
- """Create normalization module for transformer encoder layer.
56
-
57
- Args:
58
- norm_type (str): Normalization method.
59
- dim (int): Dimension of the normalized layer.
60
- **kwargs (dict): Additional parameters for normalization layer.
61
- Returns:
62
- nn.Module: Normalization module.
63
- """
64
- if norm_type == 'layer_norm':
65
- return nn.LayerNorm(dim, eps=1e-5, **kwargs)
66
- else:
67
- raise ValueError(f"Unknown norm type: {norm_type}")
68
-
69
-
70
- def create_sin_embedding(positions: torch.Tensor, dim: int, max_period: float = 10000,
71
- dtype: torch.dtype = torch.float32) -> torch.Tensor:
72
- """Create sinusoidal positional embedding, with shape `[B, T, C]`.
73
-
74
- Args:
75
- positions (torch.Tensor): LongTensor of positions.
76
- dim (int): Dimension of the embedding.
77
- max_period (float): Maximum period of the cosine/sine functions.
78
- dtype (torch.dtype or str): dtype to use to generate the embedding.
79
- Returns:
80
- torch.Tensor: Sinusoidal positional embedding.
81
- """
82
- # We aim for BTC format
83
- assert dim % 2 == 0
84
- half_dim = dim // 2
85
- positions = positions.to(dtype)
86
- adim = torch.arange(half_dim, device=positions.device, dtype=dtype).view(1, 1, -1)
87
- max_period_tensor = torch.full([], max_period, device=positions.device, dtype=dtype) # avoid sync point
88
- phase = positions / (max_period_tensor ** (adim / (half_dim - 1)))
89
- return torch.cat([torch.cos(phase), torch.sin(phase)], dim=-1)
90
-
91
-
92
- def expand_repeated_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor:
93
- """torch.repeat_interleave(x, dim=2, repeats=n_rep) from xlformers"""
94
- if n_rep == 1:
95
- return x
96
- if _efficient_attention_backend == 'torch':
97
- bs, n_kv_heads, slen, head_dim = x.shape
98
- return (
99
- x[:, :, None, :, :]
100
- .expand(bs, n_kv_heads, n_rep, slen, head_dim)
101
- .reshape(bs, n_kv_heads * n_rep, slen, head_dim)
102
- )
103
- else:
104
- bs, slen, n_kv_heads, head_dim = x.shape
105
- return (
106
- x[:, :, :, None, :]
107
- .expand(bs, slen, n_kv_heads, n_rep, head_dim)
108
- .reshape(bs, slen, n_kv_heads * n_rep, head_dim)
109
- )
110
-
111
-
112
- class LayerScale(nn.Module):
113
- """Layer scale from [Touvron et al 2021] (https://arxiv.org/pdf/2103.17239.pdf).
114
- This rescales diagonaly the residual outputs close to 0, with a learnt scale.
115
-
116
- Args:
117
- channels (int): Number of channels.
118
- init (float): Initial scale.
119
- channel_last (bool): If True, expect `[*, C]` shaped tensors, otherwise, `[*, C, T]`.
120
- device (torch.device or None): Device on which to initialize the module.
121
- dtype (torch.dtype or None): dtype to use to initialize the module.
122
- """
123
- def __init__(self, channels: int, init: float = 1e-4, channel_last: bool = True,
124
- device=None, dtype=None):
125
- super().__init__()
126
- self.channel_last = channel_last
127
- self.scale = nn.Parameter(
128
- torch.full((channels,), init,
129
- requires_grad=True, device=device, dtype=dtype))
130
-
131
- def forward(self, x: torch.Tensor):
132
- if self.channel_last:
133
- return self.scale * x
134
- else:
135
- return self.scale[:, None] * x
136
-
137
-
138
- class StreamingMultiheadAttention(StreamingModule):
139
- """Similar to `nn.MultiheadAttention` but with support for streaming, causal evaluation.
140
-
141
- Args:
142
- embed_dim (int): Dimension to project to.
143
- num_heads (int): Number of heads.
144
- dropout (float): Dropout level.
145
- bias (bool): Use bias in projections.
146
- causal (bool): Causal mask applied automatically.
147
- past_context (int or None): Receptive field for the causal mask, infinite if None.
148
- custom (bool): Use custom MHA implementation, for testing / benchmarking.
149
- memory_efficient (bool): Use xformers based memory efficient attention.
150
- attention_as_float32 (bool): Perform the attention as float32
151
- (especially important with memory_efficient as autocast won't do this automatically).
152
- rope (`RotaryEmbedding` or None): Rope embedding to use.
153
- cross_attention: Should be true when used as a cross attention.
154
- All keys and values must be available at once, streaming is only for the queries.
155
- Cannot be used with `causal` or `rope` (as it wouldn't make sens to
156
- intepret the time steps in the keys relative to those in the queries).
157
- safe_streaming (bool): Bug fix, will go away with xformers update.
158
- qk_layer_norm (bool): Layer normalization applied to queries and keys before dot product.
159
- kv_repeat (int): If > 1, will repeat keys and queries multiple times (need to divide num_heads).
160
- This will lead to faster decoding time on A100 or other GPUs with tensorcore.
161
- device (torch.device or None): Sevice on which to initialize.
162
- dtype (torch.dtype or None): dtype to use.
163
- """
164
- def __init__(self, embed_dim: int, num_heads: int, dropout: float = 0.0, bias: bool = True,
165
- causal: bool = False, past_context: tp.Optional[int] = None, custom: bool = False,
166
- memory_efficient: bool = False, attention_as_float32: bool = False,
167
- rope: tp.Optional[RotaryEmbedding] = None, cross_attention: bool = False,
168
- safe_streaming: bool = True, qk_layer_norm: bool = False, kv_repeat: int = 1,
169
- device=None, dtype=None):
170
- super().__init__()
171
- factory_kwargs = {'device': device, 'dtype': dtype}
172
- if past_context is not None:
173
- assert causal
174
-
175
- self.embed_dim = embed_dim
176
- self.causal = causal
177
- self.past_context = past_context
178
- self.memory_efficient = memory_efficient
179
- self.attention_as_float32 = attention_as_float32
180
- self.rope = rope
181
- self.cross_attention = cross_attention
182
- self.safe_streaming = safe_streaming
183
- self.num_heads = num_heads
184
- self.dropout = dropout
185
- self.kv_repeat = kv_repeat
186
- if cross_attention:
187
- assert not causal, "Causal cannot work with cross attention."
188
- assert rope is None, "Rope cannot work with cross attention."
189
-
190
- if memory_efficient:
191
- _verify_xformers_memory_efficient_compat()
192
-
193
- self.custom = _is_custom(custom, memory_efficient)
194
- if self.custom:
195
- out_dim = embed_dim
196
- assert num_heads % kv_repeat == 0
197
- assert not cross_attention or kv_repeat == 1
198
- num_kv = num_heads // kv_repeat
199
- kv_dim = (embed_dim // num_heads) * num_kv
200
- out_dim += 2 * kv_dim
201
- in_proj = nn.Linear(embed_dim, out_dim, bias=bias, **factory_kwargs)
202
- # We try to follow the default PyTorch MHA convention, to easily compare results.
203
- self.in_proj_weight = in_proj.weight
204
- self.in_proj_bias = in_proj.bias
205
- if bias:
206
- self.in_proj_bias.data.zero_() # Following Pytorch convention
207
- self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias, **factory_kwargs)
208
- if bias:
209
- self.out_proj.bias.data.zero_()
210
- else:
211
- assert not qk_layer_norm
212
- assert kv_repeat == 1
213
- self.mha = nn.MultiheadAttention(
214
- embed_dim, num_heads, dropout=dropout, bias=bias, batch_first=True,
215
- **factory_kwargs)
216
- self.qk_layer_norm = qk_layer_norm
217
- if qk_layer_norm:
218
- assert self.custom
219
- assert kv_repeat == 1
220
- ln_dim = embed_dim
221
- self.q_layer_norm = nn.LayerNorm(ln_dim)
222
- self.k_layer_norm = nn.LayerNorm(ln_dim)
223
-
224
- def _load_from_state_dict(self, state_dict, prefix, *args, **kwargs):
225
- if not self.custom:
226
- # Support compat with regular MHA
227
- keys = [n for n, _ in self.mha.named_parameters()]
228
- for key in keys:
229
- if prefix + key in state_dict:
230
- state_dict[prefix + "mha." + key] = state_dict.pop(prefix + key)
231
- super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
232
-
233
- def _get_mask(self, current_steps: int, device: torch.device, dtype: torch.dtype):
234
- # Return a causal mask, accounting for potentially stored past keys/values
235
- # We actually return a bias for the attention score, as this has the same
236
- # convention both in the builtin MHA in Pytorch, and Xformers functions.
237
- time_dim = _get_attention_time_dimension()
238
- if self.memory_efficient:
239
- from xformers.ops import LowerTriangularMask
240
- if current_steps == 1:
241
- # If we only have one step, then we do not need a mask.
242
- return None
243
- elif 'past_keys' in self._streaming_state:
244
- raise RuntimeError('Not supported at the moment')
245
- else:
246
- # Then we can safely use a lower triangular mask
247
- return LowerTriangularMask()
248
- if self._streaming_state:
249
- past_keys = self._streaming_state['past_keys']
250
- past_steps = past_keys.shape[time_dim]
251
- else:
252
- past_steps = 0
253
-
254
- queries_pos = torch.arange(
255
- past_steps, current_steps + past_steps, device=device).view(-1, 1)
256
- keys_pos = torch.arange(past_steps + current_steps, device=device).view(1, -1)
257
- delta = queries_pos - keys_pos
258
- valid = delta >= 0
259
- if self.past_context is not None:
260
- valid &= (delta <= self.past_context)
261
- return torch.where(
262
- valid,
263
- torch.zeros([], device=device, dtype=dtype),
264
- torch.full([], float('-inf'), device=device, dtype=dtype))
265
-
266
- def _complete_kv(self, k, v):
267
- time_dim = _get_attention_time_dimension()
268
- if self.cross_attention:
269
- # With cross attention we assume all keys and values
270
- # are already available, and streaming is with respect
271
- # to the queries only.
272
- return k, v
273
- # Complete the key/value pair using the streaming state.
274
- if self._streaming_state:
275
- pk = self._streaming_state['past_keys']
276
- nk = torch.cat([pk, k], dim=time_dim)
277
- if v is k:
278
- nv = nk
279
- else:
280
- pv = self._streaming_state['past_values']
281
- nv = torch.cat([pv, v], dim=time_dim)
282
- else:
283
- nk = k
284
- nv = v
285
-
286
- assert nk.shape[time_dim] == nv.shape[time_dim]
287
- offset = 0
288
- if self.past_context is not None:
289
- offset = max(0, nk.shape[time_dim] - self.past_context)
290
- if self._is_streaming:
291
- self._streaming_state['past_keys'] = nk[:, offset:]
292
- if v is not k:
293
- self._streaming_state['past_values'] = nv[:, offset:]
294
- if 'offset' in self._streaming_state:
295
- self._streaming_state['offset'] += offset
296
- else:
297
- self._streaming_state['offset'] = torch.tensor(0)
298
- return nk, nv
299
-
300
- def _apply_rope(self, query: torch.Tensor, key: torch.Tensor):
301
- # TODO: fix and verify layout.
302
- assert _efficient_attention_backend == 'xformers', 'Rope not supported with torch attn.'
303
- # Apply rope embeddings to query and key tensors.
304
- assert self.rope is not None
305
- if 'past_keys' in self._streaming_state:
306
- past_keys_offset = self._streaming_state['past_keys'].shape[1]
307
- else:
308
- past_keys_offset = 0
309
- if 'offset' in self._streaming_state:
310
- past_context_offset = int(self._streaming_state['offset'].item())
311
- else:
312
- past_context_offset = 0
313
- streaming_offset = past_context_offset + past_keys_offset
314
- return self.rope.rotate_qk(query, key, start=streaming_offset)
315
-
316
- def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor,
317
- key_padding_mask=None, need_weights=False, attn_mask=None,
318
- average_attn_weights=True, is_causal=False):
319
- assert attn_mask is None
320
- assert not is_causal, ("new param added in torch 2.0.1 not supported, "
321
- "use the causal args in the constructor.")
322
-
323
- time_dim = _get_attention_time_dimension()
324
- if time_dim == 2:
325
- layout = "b h t d"
326
- else:
327
- layout = "b t h d"
328
- dtype = query.dtype
329
- if self._is_streaming:
330
- assert self.causal or self.cross_attention, \
331
- "Streaming only available for causal or cross attention"
332
-
333
- if self.causal:
334
- # At the moment we specialize only for the self-attention case.
335
- assert query.shape[1] == key.shape[1], "Causal only for same length query / key / value"
336
- assert value.shape[1] == key.shape[1], "Causal only for same length query / key / value"
337
- attn_mask = self._get_mask(query.shape[1], query.device, query.dtype)
338
-
339
- if self.custom:
340
- # custom implementation
341
- assert need_weights is False
342
- assert key_padding_mask is None
343
- if self.cross_attention:
344
- # Different queries, keys, values, we have to spit manually the weights
345
- # before applying the linear.
346
- dim = self.in_proj_weight.shape[0] // 3
347
- if self.in_proj_bias is None:
348
- bias_q, bias_k, bias_v = None, None, None
349
- else:
350
- bias_q = self.in_proj_bias[:dim]
351
- bias_k = self.in_proj_bias[dim: 2 * dim]
352
- bias_v = self.in_proj_bias[2 * dim:]
353
- q = nn.functional.linear(query, self.in_proj_weight[:dim], bias_q)
354
- # todo: when streaming, we could actually save k, v and check the shape actually match.
355
- k = nn.functional.linear(key, self.in_proj_weight[dim: 2 * dim], bias_k)
356
- v = nn.functional.linear(value, self.in_proj_weight[2 * dim:], bias_v)
357
- if self.qk_layer_norm is True:
358
- q = self.q_layer_norm(q)
359
- k = self.k_layer_norm(k)
360
- q, k, v = [rearrange(x, f"b t (h d) -> {layout}", h=self.num_heads) for x in [q, k, v]]
361
- else:
362
- if not _is_profiled():
363
- # profiling breaks that propertysomehow.
364
- assert query is key, "specialized implementation"
365
- assert value is key, "specialized implementation"
366
- projected = nn.functional.linear(query, self.in_proj_weight, self.in_proj_bias)
367
- if self.kv_repeat == 1:
368
- if time_dim == 2:
369
- bound_layout = "b h p t d"
370
- else:
371
- bound_layout = "b t p h d"
372
- packed = rearrange(projected, f"b t (p h d) -> {bound_layout}", p=3, h=self.num_heads)
373
- q, k, v = ops.unbind(packed, dim=2)
374
- else:
375
- embed_dim = self.embed_dim
376
- per_head_dim = (embed_dim // self.num_heads)
377
- kv_heads = self.num_heads // self.kv_repeat
378
- q = projected[:, :, :embed_dim]
379
- start = embed_dim
380
- end = start + per_head_dim * kv_heads
381
- k = projected[:, :, start: end]
382
- v = projected[:, :, end:]
383
- q = rearrange(q, f"b t (h d) -> {layout}", h=self.num_heads)
384
- k = rearrange(k, f"b t (h d) -> {layout}", h=kv_heads)
385
- v = rearrange(v, f"b t (h d) -> {layout}", h=kv_heads)
386
-
387
- if self.qk_layer_norm is True:
388
- assert self.kv_repeat == 1
389
- q, k = [rearrange(x, f"{layout} -> b t (h d)") for x in [q, k]]
390
- q = self.q_layer_norm(q)
391
- k = self.k_layer_norm(k)
392
- q, k = [rearrange(x, f"b t (h d) -> {layout}", h=self.num_heads) for x in [q, k]]
393
- if self.rope:
394
- q, k = self._apply_rope(q, k)
395
- k, v = self._complete_kv(k, v)
396
- if self.kv_repeat > 1:
397
- k = expand_repeated_kv(k, self.kv_repeat)
398
- v = expand_repeated_kv(v, self.kv_repeat)
399
- if self.attention_as_float32:
400
- q, k, v = [x.float() for x in [q, k, v]]
401
- if self.memory_efficient:
402
- p = self.dropout if self.training else 0
403
- if _efficient_attention_backend == 'torch':
404
- x = torch.nn.functional.scaled_dot_product_attention(
405
- q, k, v, is_causal=attn_mask is not None, dropout_p=p)
406
- else:
407
- x = ops.memory_efficient_attention(q, k, v, attn_mask, p=p)
408
- else:
409
- # We include the dot product as float32, for consistency
410
- # with the other implementations that include that step
411
- # as part of the attention. Note that when using `autocast`,
412
- # the einsums would be done as bfloat16, but the softmax
413
- # would be done as bfloat16, so `attention_as_float32` will
414
- # extend a bit the range of operations done in float32,
415
- # although this should make no difference.
416
- q = q / q.shape[-1] ** 0.5
417
- key_layout = layout.replace('t', 'k')
418
- query_layout = layout
419
- if self._is_streaming and self.safe_streaming and q.device.type == 'cuda':
420
- with torch.autocast(device_type=q.device.type, dtype=torch.float32):
421
- pre_w = torch.einsum(f"{query_layout},{key_layout}-> b h t k", q, k)
422
- else:
423
- pre_w = torch.einsum(f"{query_layout},{key_layout}-> b h t k", q, k)
424
- if attn_mask is not None:
425
- pre_w = pre_w + attn_mask
426
- w = torch.softmax(pre_w, dim=-1)
427
- w = F.dropout(w, self.dropout, training=self.training).to(v)
428
- # Key and value have the same format.
429
- x = torch.einsum(f"b h t k, {key_layout} -> {layout}", w, v)
430
- x = x.to(dtype)
431
- x = rearrange(x, f"{layout} -> b t (h d)", h=self.num_heads)
432
- x = self.out_proj(x)
433
- else:
434
- key, value = self._complete_kv(key, value)
435
- if self.attention_as_float32:
436
- query, key, value = [x.float() for x in [query, key, value]]
437
- x, _ = self.mha(
438
- query, key, value, key_padding_mask,
439
- need_weights, attn_mask, average_attn_weights)
440
- x = x.to(dtype)
441
-
442
- return x, None
443
-
444
-
445
- class StreamingTransformerLayer(nn.TransformerEncoderLayer):
446
- """TransformerLayer with Streaming / Causal support.
447
- This also integrates cross_attention, when passing `cross_attention=True`,
448
- rather than having two separate classes like in PyTorch.
449
-
450
- Args:
451
- d_model (int): Dimension of the data.
452
- num_heads (int): Number of heads.
453
- dim_feedforward (int): Intermediate dimension of FF module.
454
- dropout (float): Dropout both for MHA and FF.
455
- bias_ff (bool): Use bias for FF.
456
- bias_attn (bool): Use bias for MHA.
457
- causal (bool): Causal mask applied automatically.
458
- past_context (int or None): Receptive field for the causal mask, infinite if None.
459
- custom (bool): Use custom MHA implementation, for testing / benchmarking.
460
- memory_efficient (bool): Use xformers based memory efficient attention.
461
- attention_as_float32 (bool): Perform the attention as float32
462
- (especially important with memory_efficient as autocast won't do this automatically).
463
- qk_layer_norm (bool): Layer normalization applied to queries and keys before dot product in attention.
464
- qk_layer_norm_cross (bool): Same for the cross attention.
465
- cross_attention (bool): If True, expect to get secondary input for cross-attention.
466
- Cross attention will use the default MHA, as it typically won't require
467
- special treatment.
468
- layer_scale (float or None): If not None, LayerScale will be used with
469
- the given value as initial scale.
470
- rope (`RotaryEmbedding` or None): Rope embedding to use.
471
- attention_dropout (float or None): If not None, separate the value of the dimension dropout
472
- in FFN and of the attention dropout.
473
- kv_repeat (int): If > 1, will repeat keys and queries multiple times (need to divide num_heads).
474
- This will lead to faster decoding time on A100 or other GPUs with tensorcore.
475
- device (torch.device or None): Device on which to initialize.
476
- dtype (torch.dtype or None): dtype to use.
477
- **kwargs: See `nn.TransformerEncoderLayer`.
478
- """
479
- def __init__(self, d_model: int, num_heads: int, dim_feedforward: int = 2048, dropout: float = 0.1,
480
- bias_ff: bool = True, bias_attn: bool = True, causal: bool = False,
481
- past_context: tp.Optional[int] = None, custom: bool = False,
482
- memory_efficient: bool = False, attention_as_float32: bool = False,
483
- qk_layer_norm: bool = False, qk_layer_norm_cross: bool = False,
484
- cross_attention: bool = False, layer_scale: tp.Optional[float] = None,
485
- rope: tp.Optional[RotaryEmbedding] = None, attention_dropout: tp.Optional[float] = None,
486
- kv_repeat: int = 1, norm: str = 'layer_norm', device=None, dtype=None, **kwargs):
487
- super().__init__(d_model, num_heads, dim_feedforward, dropout,
488
- device=device, dtype=dtype, batch_first=True, **kwargs)
489
- factory_kwargs = {'device': device, 'dtype': dtype}
490
- # Redefine self_attn to our streaming multi-head attention
491
- attn_kwargs: tp.Dict[str, tp.Any] = {
492
- 'embed_dim': d_model,
493
- 'num_heads': num_heads,
494
- 'dropout': dropout if attention_dropout is None else attention_dropout,
495
- 'bias': bias_attn,
496
- 'custom': custom,
497
- 'memory_efficient': memory_efficient,
498
- 'attention_as_float32': attention_as_float32,
499
- }
500
- self.self_attn: StreamingMultiheadAttention = StreamingMultiheadAttention(
501
- causal=causal, past_context=past_context, rope=rope, qk_layer_norm=qk_layer_norm,
502
- kv_repeat=kv_repeat, **attn_kwargs, **factory_kwargs) # type: ignore
503
- # Redefine feedforward layers to expose bias parameter
504
- self.linear1 = nn.Linear(d_model, dim_feedforward, bias=bias_ff, **factory_kwargs)
505
- self.linear2 = nn.Linear(dim_feedforward, d_model, bias=bias_ff, **factory_kwargs)
506
-
507
- self.layer_scale_1: nn.Module
508
- self.layer_scale_2: nn.Module
509
- if layer_scale is None:
510
- self.layer_scale_1 = nn.Identity()
511
- self.layer_scale_2 = nn.Identity()
512
- else:
513
- self.layer_scale_1 = LayerScale(d_model, layer_scale, **factory_kwargs)
514
- self.layer_scale_2 = LayerScale(d_model, layer_scale, **factory_kwargs)
515
-
516
- self.cross_attention: tp.Optional[nn.Module] = None
517
- if cross_attention:
518
- self.cross_attention = StreamingMultiheadAttention(
519
- cross_attention=True, qk_layer_norm=qk_layer_norm_cross,
520
- **attn_kwargs, **factory_kwargs)
521
- # Norm and dropout
522
- self.dropout_cross = nn.Dropout(dropout)
523
- # eps value matching that used in PyTorch reference implementation.
524
- self.norm_cross = nn.LayerNorm(d_model, eps=1e-5, **factory_kwargs)
525
- self.layer_scale_cross: nn.Module
526
- if layer_scale is None:
527
- self.layer_scale_cross = nn.Identity()
528
- else:
529
- self.layer_scale_cross = LayerScale(d_model, layer_scale, **factory_kwargs)
530
- self.norm1 = create_norm_fn(norm, d_model, **factory_kwargs) # type: ignore
531
- self.norm2 = create_norm_fn(norm, d_model, **factory_kwargs) # type: ignore
532
-
533
- def _cross_attention_block(self, src: torch.Tensor,
534
- cross_attention_src: torch.Tensor) -> torch.Tensor:
535
- assert self.cross_attention is not None
536
- # queries are from src, keys and values from cross_attention_src.
537
- x = self.cross_attention(
538
- src, cross_attention_src, cross_attention_src, need_weights=False)[0]
539
- return self.dropout_cross(x) # type: ignore
540
-
541
- def forward(self, src: torch.Tensor, src_mask: tp.Optional[torch.Tensor] = None, # type: ignore
542
- src_key_padding_mask: tp.Optional[torch.Tensor] = None,
543
- cross_attention_src: tp.Optional[torch.Tensor] = None):
544
- if self.cross_attention is None:
545
- assert cross_attention_src is None
546
- else:
547
- assert cross_attention_src is not None
548
- x = src
549
- if self.norm_first:
550
- x = x + self.layer_scale_1(
551
- self._sa_block(self.norm1(x), src_mask, src_key_padding_mask))
552
- if cross_attention_src is not None:
553
- x = x + self.layer_scale_cross(
554
- self._cross_attention_block(
555
- self.norm_cross(x), cross_attention_src))
556
- x = x + self.layer_scale_2(self._ff_block(self.norm2(x)))
557
- else:
558
- x = self.norm1(x + self.layer_scale_1(
559
- self._sa_block(x, src_mask, src_key_padding_mask)))
560
- if cross_attention_src is not None:
561
- x = self.norm_cross(
562
- x + self.layer_scale_cross(
563
- self._cross_attention_block(src, cross_attention_src)))
564
- x = self.norm2(x + self.layer_scale_2(self._ff_block(x)))
565
- return x
566
-
567
-
568
- class StreamingTransformer(StreamingModule):
569
- """Transformer with Streaming / Causal support.
570
-
571
- Args:
572
- d_model (int): Dimension of the data.
573
- num_heads (int): Number of heads.
574
- dim_feedforward (int): Intermediate dimension of FF module.
575
- dropout (float): Dropout both for MHA and FF.
576
- bias_ff (bool): Use bias for FF.
577
- bias_attn (bool): Use bias for MHA.
578
- causal (bool): Causal mask applied automatically.
579
- past_context (int or None): Receptive field for the causal mask, infinite if None.
580
- custom (bool): Use custom MHA implementation, for testing / benchmarking.
581
- memory_efficient (bool): Use xformers based memory efficient attention.
582
- attention_as_float32 (bool): Perform the attention as float32
583
- (especially important with memory_efficient as autocast won't do this automatically).
584
- cross_attention (bool): If True, expect to get secondary input for cross-attention.
585
- layer_scale (float or None): If not None, LayerScale will be used
586
- with the given value as initial scale.
587
- positional_embedding (str): Positional embedding strategy (sin, rope, or sin_rope).
588
- max_period (float): Maximum period of the time embedding.
589
- positional_scale (float): Scale of positional embedding, set to 0 to deactivate.
590
- xpos (bool): Apply xpos exponential decay to positional embedding (rope only).
591
- lr (float or None): learning rate override through the `make_optim_group` API.
592
- weight_decay (float or None): Weight_decay override through the `make_optim_group` API.
593
- layer_class: (subclass of `StreamingTransformerLayer): class to use
594
- to initialize the layers, allowing further customization outside of Audiocraft.
595
- checkpointing (str): Checkpointing strategy to reduce memory usage.
596
- No checkpointing if set to 'none'. Per layer checkpointing using PyTorch
597
- if set to 'torch' (entire layer checkpointed, i.e. linears are evaluated twice,
598
- minimal memory usage, but maximal runtime). Finally, `xformers_default` provide
599
- a policy for opting-out some operations of the checkpointing like
600
- linear layers and attention, providing a middle ground between speed and memory.
601
- device (torch.device or None): Device on which to initialize.
602
- dtype (torch.dtype or None): dtype to use.
603
- **kwargs: See `nn.TransformerEncoderLayer`.
604
- """
605
- def __init__(self, d_model: int, num_heads: int, num_layers: int, dim_feedforward: int = 2048,
606
- dropout: float = 0.1, bias_ff: bool = True, bias_attn: bool = True,
607
- causal: bool = False, past_context: tp.Optional[int] = None,
608
- custom: bool = False, memory_efficient: bool = False, attention_as_float32: bool = False,
609
- cross_attention: bool = False, layer_scale: tp.Optional[float] = None,
610
- positional_embedding: str = 'sin', max_period: float = 10_000, positional_scale: float = 1.,
611
- xpos: bool = False, lr: tp.Optional[float] = None, weight_decay: tp.Optional[float] = None,
612
- layer_class: tp.Type[StreamingTransformerLayer] = StreamingTransformerLayer,
613
- checkpointing: str = 'none', device=None, dtype=None, **kwargs):
614
- super().__init__()
615
- assert d_model % num_heads == 0
616
-
617
- self.positional_embedding = positional_embedding
618
- self.max_period = max_period
619
- self.positional_scale = positional_scale
620
- self.weight_decay = weight_decay
621
- self.lr = lr
622
-
623
- assert positional_embedding in ['sin', 'rope', 'sin_rope']
624
- self.rope: tp.Optional[RotaryEmbedding] = None
625
- if self.positional_embedding in ['rope', 'sin_rope']:
626
- assert _is_custom(custom, memory_efficient)
627
- self.rope = RotaryEmbedding(d_model // num_heads, max_period=max_period,
628
- xpos=xpos, scale=positional_scale, device=device)
629
-
630
- self.checkpointing = checkpointing
631
-
632
- assert checkpointing in ['none', 'torch', 'xformers_default', 'xformers_mm']
633
- if self.checkpointing.startswith('xformers'):
634
- _verify_xformers_internal_compat()
635
-
636
- self.layers = nn.ModuleList()
637
- for idx in range(num_layers):
638
- self.layers.append(
639
- layer_class(
640
- d_model=d_model, num_heads=num_heads, dim_feedforward=dim_feedforward,
641
- dropout=dropout, bias_ff=bias_ff, bias_attn=bias_attn,
642
- causal=causal, past_context=past_context, custom=custom,
643
- memory_efficient=memory_efficient, attention_as_float32=attention_as_float32,
644
- cross_attention=cross_attention, layer_scale=layer_scale, rope=self.rope,
645
- device=device, dtype=dtype, **kwargs))
646
-
647
- if self.checkpointing != 'none':
648
- for layer in self.layers:
649
- # see audiocraft/optim/fsdp.py, magic signal to indicate this requires fixing the
650
- # backward hook inside of FSDP...
651
- layer._magma_checkpointed = True # type: ignore
652
- assert layer.layer_drop == 0., "Need further checking" # type: ignore
653
-
654
- def _apply_layer(self, layer, *args, **kwargs):
655
- method = self.checkpointing
656
- if method == 'none':
657
- return layer(*args, **kwargs)
658
- elif method == 'torch':
659
- return torch_checkpoint(layer, *args, use_reentrant=False, **kwargs)
660
- elif method.startswith('xformers'):
661
- from xformers.checkpoint_fairinternal import checkpoint, _get_default_policy
662
- if method == 'xformers_default':
663
- # those operations will be saved, and not recomputed.
664
- # According to Francisco we can get smarter policies but this is a good start.
665
- allow_list = [
666
- "xformers.efficient_attention_forward_cutlass.default",
667
- "xformers_flash.flash_fwd.default",
668
- "aten.addmm.default",
669
- "aten.mm.default",
670
- ]
671
- elif method == 'xformers_mm':
672
- # those operations will be saved, and not recomputed.
673
- # According to Francisco we can get smarter policies but this is a good start.
674
- allow_list = [
675
- "aten.addmm.default",
676
- "aten.mm.default",
677
- ]
678
- else:
679
- raise ValueError(f"xformers checkpointing xformers policy {method} is not known.")
680
- policy_fn = _get_default_policy(allow_list)
681
- return checkpoint(layer, *args, policy_fn=policy_fn, **kwargs)
682
- else:
683
- raise ValueError(f"Checkpointing method {method} is unknown.")
684
-
685
- def forward(self, x: torch.Tensor, *args, **kwargs):
686
- B, T, C = x.shape
687
-
688
- if 'offsets' in self._streaming_state:
689
- offsets = self._streaming_state['offsets']
690
- else:
691
- offsets = torch.zeros(B, dtype=torch.long, device=x.device)
692
-
693
- if self.positional_embedding in ['sin', 'sin_rope']:
694
- positions = torch.arange(T, device=x.device).view(1, -1, 1)
695
- positions = positions + offsets.view(-1, 1, 1)
696
- pos_emb = create_sin_embedding(positions, C, max_period=self.max_period, dtype=x.dtype)
697
- x = x + self.positional_scale * pos_emb
698
-
699
- for layer in self.layers:
700
- x = self._apply_layer(layer, x, *args, **kwargs)
701
-
702
- if self._is_streaming:
703
- self._streaming_state['offsets'] = offsets + T
704
-
705
- return x
706
-
707
- def make_optim_group(self):
708
- group = {"params": list(self.parameters())}
709
- if self.lr is not None:
710
- group["lr"] = self.lr
711
- if self.weight_decay is not None:
712
- group["weight_decay"] = self.weight_decay
713
- return group
714
-
715
-
716
- # special attention attention related function
717
-
718
- def _verify_xformers_memory_efficient_compat():
719
- try:
720
- from xformers.ops import memory_efficient_attention, LowerTriangularMask # noqa
721
- except ImportError:
722
- raise ImportError(
723
- "xformers is not installed. Please install it and try again.\n"
724
- "To install on AWS and Azure, run \n"
725
- "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='8.0'\\\n"
726
- "pip install -U git+https://[email protected]/fairinternal/xformers.git#egg=xformers\n"
727
- "To install on FAIR Cluster, run \n"
728
- "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='6.0;7.0'\\\n"
729
- "pip install -U git+https://[email protected]/fairinternal/xformers.git#egg=xformers\n")
730
-
731
-
732
- def _verify_xformers_internal_compat():
733
- try:
734
- from xformers.checkpoint_fairinternal import checkpoint, _get_default_policy # noqa
735
- except ImportError:
736
- raise ImportError(
737
- "Francisco's fairinternal xformers is not installed. Please install it and try again.\n"
738
- "To install on AWS and Azure, run \n"
739
- "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='8.0'\\\n"
740
- "pip install -U git+https://[email protected]/fairinternal/xformers.git#egg=xformers\n"
741
- "To install on FAIR Cluster, run \n"
742
- "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='6.0;7.0'\\\n"
743
- "pip install -U git+https://[email protected]/fairinternal/xformers.git#egg=xformers\n")
744
-
745
-
746
- def _is_custom(custom: bool, memory_efficient: bool):
747
- return custom or memory_efficient
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/login/+page.server.ts DELETED
@@ -1,16 +0,0 @@
1
- import { redirect } from "@sveltejs/kit";
2
- import { getOIDCAuthorizationUrl } from "$lib/server/auth";
3
- import { base } from "$app/paths";
4
-
5
- export const actions = {
6
- default: async function ({ url, locals, request }) {
7
- // TODO: Handle errors if provider is not responding
8
- const referer = request.headers.get("referer");
9
- const authorizationUrl = await getOIDCAuthorizationUrl(
10
- { redirectURI: `${(referer ? new URL(referer) : url).origin}${base}/login/callback` },
11
- { sessionId: locals.sessionId }
12
- );
13
-
14
- throw redirect(303, authorizationUrl);
15
- },
16
- };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/helpers/phind.py DELETED
@@ -1,69 +0,0 @@
1
- import sys
2
- import json
3
- import datetime
4
- import urllib.parse
5
-
6
- from curl_cffi import requests
7
-
8
- config = json.loads(sys.argv[1])
9
- prompt = config['messages'][-1]['content']
10
-
11
- skill = 'expert' if config['model'] == 'gpt-4' else 'intermediate'
12
-
13
- json_data = json.dumps({
14
- 'question': prompt,
15
- 'options': {
16
- 'skill': skill,
17
- 'date': datetime.datetime.now().strftime('%d/%m/%Y'),
18
- 'language': 'en',
19
- 'detailed': True,
20
- 'creative': True,
21
- 'customLinks': []}}, separators=(',', ':'))
22
-
23
- headers = {
24
- 'Content-Type': 'application/json',
25
- 'Pragma': 'no-cache',
26
- 'Accept': '*/*',
27
- 'Sec-Fetch-Site': 'same-origin',
28
- 'Accept-Language': 'en-GB,en;q=0.9',
29
- 'Cache-Control': 'no-cache',
30
- 'Sec-Fetch-Mode': 'cors',
31
- 'Content-Length': str(len(json_data)),
32
- 'Origin': 'https://www.phind.com',
33
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4 Safari/605.1.15',
34
- 'Referer': f'https://www.phind.com/search?q={urllib.parse.quote(prompt)}&source=searchbox',
35
- 'Connection': 'keep-alive',
36
- 'Host': 'www.phind.com',
37
- 'Sec-Fetch-Dest': 'empty'
38
- }
39
-
40
-
41
- def output(chunk):
42
- try:
43
- if b'PHIND_METADATA' in chunk:
44
- return
45
-
46
- if chunk == b'data: \r\ndata: \r\ndata: \r\n\r\n':
47
- chunk = b'data: \n\r\n\r\n'
48
-
49
- chunk = chunk.decode()
50
-
51
- chunk = chunk.replace('data: \r\n\r\ndata: ', 'data: \n')
52
- chunk = chunk.replace('\r\ndata: \r\ndata: \r\n\r\n', '\n\r\n\r\n')
53
- chunk = chunk.replace('data: ', '').replace('\r\n\r\n', '')
54
-
55
- print(chunk, flush=True, end = '')
56
-
57
- except json.decoder.JSONDecodeError:
58
- pass
59
-
60
- while True:
61
- try:
62
- response = requests.post('https://www.phind.com/api/infer/answer',
63
- headers=headers, data=json_data, content_callback=output, timeout=999999, impersonate='safari15_5')
64
-
65
- exit(0)
66
-
67
- except Exception as e:
68
- print('an error occured, retrying... |', e, flush=True)
69
- continue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/utils/ClearChildren.js DELETED
@@ -1,29 +0,0 @@
1
- import Container from '../../container/Container.js';
2
-
3
- const ContainerClear = Container.prototype.clear;
4
-
5
- var ClearChildren = function (destroyChild) {
6
- if (this.backgroundChildren) {
7
- this.backgroundChildren.length = 0;
8
- }
9
-
10
- var fireRemoveEvent = !destroyChild && this.sizerEventsEnable;
11
- var children;
12
- if (fireRemoveEvent) {
13
- children = this.getChildren([]);
14
- }
15
-
16
- ContainerClear.call(this, destroyChild);
17
-
18
- if (fireRemoveEvent) {
19
- var gameObject;
20
- for (var i = 0, cnt = children.length; i < cnt; i++) {
21
- gameObject = children[i];
22
- gameObject.emit('sizer.remove', gameObject, this);
23
- this.emit('remove', gameObject, this);
24
- }
25
- }
26
- return this;
27
- }
28
-
29
- export default ClearChildren;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/slider/Slider.d.ts DELETED
@@ -1,58 +0,0 @@
1
- // import * as Phaser from 'phaser';
2
- import Sizer from '../sizer/Sizer';
3
- import RoundRecrangle from '../../../plugins/roundrectangle';
4
-
5
-
6
- export default Slider;
7
-
8
- declare namespace Slider {
9
-
10
- type InputTypes = 0 | 1 | -1 | 'drag' | 'pan' | 'click' | 'none';
11
-
12
- interface IConfig extends Sizer.IConfig {
13
- reverseAxis?: boolean,
14
- background?: Phaser.GameObjects.GameObject | RoundRecrangle.IConfig,
15
- track?: Phaser.GameObjects.GameObject | RoundRecrangle.IConfig,
16
- indicator?: Phaser.GameObjects.GameObject | RoundRecrangle.IConfig,
17
- thumb?: Phaser.GameObjects.GameObject | RoundRecrangle.IConfig,
18
-
19
- input?: InputTypes,
20
-
21
- gap?: number,
22
-
23
- value?: number,
24
- min?: number, max?: number,
25
-
26
- easeValue?: {
27
- duration?: number,
28
- ease?: string
29
- },
30
-
31
- valuechangeCallback: (newValue: number, oldValue: number, slider: Slider) => void,
32
-
33
- enable?: boolean,
34
- }
35
- }
36
-
37
- declare class Slider extends Sizer {
38
- constructor(
39
- scene: Phaser.Scene,
40
- config?: Slider.IConfig
41
- );
42
-
43
- value: number;
44
- getValue(min?: number, max?: number): number;
45
- setValue(value?: number, min?: number, max?: number): this;
46
- addValue(inc?: number, min?: number, max?: number): this;
47
-
48
- easeValueTo(value?: number, min?: number, max?: number): this;
49
- stopEaseValue(): this;
50
- setEaseValueDuration(duration: number): this;
51
- setEaseValueFunction(ease: string): this;
52
-
53
- setGap(gap?: number, min?: number, max?: number): this;
54
- gap: number;
55
-
56
- setEnable(enable?: boolean): this;
57
- enable: boolean;
58
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Akmyradov/TurkmenTTSweSTT/uroman/lib/NLP/utilities.pm DELETED
The diff for this file is too large to render. See raw diff
 
spaces/AlekseyKorshuk/model-evaluation/app.py DELETED
@@ -1,230 +0,0 @@
1
- import gradio as gr
2
- import os
3
- import firebase_admin
4
- from firebase_admin import db
5
- from firebase_admin import firestore
6
- from conversation import Conversation
7
- from models.base import BaseModel
8
- import json
9
-
10
- from tabs.arena_battle import get_tab_arena_battle
11
- from tabs.arena_side_by_side import get_tab_arena_side_by_side
12
- from tabs.playground import get_tab_playground
13
-
14
- from models.chatml import ChatML
15
- import json
16
- import os
17
-
18
- import gspread
19
- from oauth2client.service_account import ServiceAccountCredentials
20
-
21
- scope = ["https://spreadsheets.google.com/feeds", 'https://www.googleapis.com/auth/spreadsheets',
22
- "https://www.googleapis.com/auth/drive.file", "https://www.googleapis.com/auth/drive"]
23
-
24
- GOOGLE_SHEETS_CERTIFICATE = json.loads(os.environ.get("GOOGLE_SHEETS_CERTIFICATE"))
25
- HUGGINGFACE_TOKEN = os.environ.get("HUGGINGFACE_TOKEN")
26
- FIREBASE_URL = os.environ.get("FIREBASE_URL")
27
- CERTIFICATE = json.loads(os.environ.get("CERTIFICATE"))
28
- API_BASE_PATH = str(os.environ.get("API_BASE_PATH")).replace("\{\}", "{}")
29
-
30
- creds = ServiceAccountCredentials.from_json_keyfile_dict(GOOGLE_SHEETS_CERTIFICATE, scope)
31
- client = gspread.authorize(creds)
32
-
33
- models = [
34
- BaseModel(
35
- name="PygmalionAI/pygmalion-13b",
36
- endpoint="pygmalion-13b",
37
- namespace="tenant-chaiml-guanaco",
38
- generation_params={
39
- 'temperature': 0.7,
40
- 'repetition_penalty': 1.0,
41
- 'max_new_tokens': 128,
42
- 'top_k': 10,
43
- 'top_p': 0.9,
44
- 'do_sample': True,
45
- 'eos_token_id': 13,
46
- }
47
- ),
48
- BaseModel(
49
- name="lmsys/vicuna-7b-delta-v1.1",
50
- endpoint="vicuna-7b",
51
- namespace="tenant-chairesearch-test",
52
- generation_params={
53
- 'temperature': 0.7,
54
- 'repetition_penalty': 1.0,
55
- 'max_new_tokens': 128,
56
- 'top_k': 10,
57
- 'top_p': 0.9,
58
- 'do_sample': True,
59
- 'eos_token_id': 13,
60
- }
61
- ),
62
- BaseModel(
63
- name="PygmalionAI/pygmalion-7b",
64
- endpoint="pygmalion-7b",
65
- namespace="tenant-chairesearch-test",
66
- generation_params={
67
- 'temperature': 0.7,
68
- 'repetition_penalty': 1.0,
69
- 'max_new_tokens': 128,
70
- 'top_k': 10,
71
- 'top_p': 0.9,
72
- 'do_sample': True,
73
- 'eos_token_id': 13,
74
- }
75
- ),
76
- BaseModel(
77
- name="mosaicml/mpt-7b",
78
- endpoint="mpt-7b",
79
- namespace="tenant-chairesearch-test",
80
- generation_params={
81
- 'temperature': 0.7,
82
- 'repetition_penalty': 1.0,
83
- 'max_new_tokens': 128,
84
- 'top_k': 10,
85
- 'top_p': 0.9,
86
- 'do_sample': True,
87
- 'eos_token_id': 187,
88
- }
89
- ),
90
- BaseModel(
91
- name="mosaicml/mpt-7b-storywriter",
92
- endpoint="mpt-7b-storywriter",
93
- namespace="tenant-chairesearch-test",
94
- generation_params={
95
- 'temperature': 0.7,
96
- 'repetition_penalty': 1.0,
97
- 'max_new_tokens': 128,
98
- 'top_k': 10,
99
- 'top_p': 0.9,
100
- 'do_sample': True,
101
- 'eos_token_id': 187,
102
- }
103
- ),
104
- ChatML(
105
- name="mosaicml/mpt-7b-chat",
106
- endpoint="mpt-7b-chat",
107
- namespace="tenant-chairesearch-test",
108
- generation_params={
109
- 'temperature': 0.7,
110
- 'repetition_penalty': 1.0,
111
- 'max_new_tokens': 128,
112
- 'top_k': 10,
113
- 'top_p': 0.9,
114
- 'do_sample': True,
115
- 'eos_token_id': 50278,
116
- }
117
- ),
118
- BaseModel(
119
- name="togethercomputer/RedPajama-INCITE-Base-7B-v0.1",
120
- endpoint="redpajama-base-7b",
121
- namespace="tenant-chairesearch-test",
122
- generation_params={
123
- 'temperature': 0.7,
124
- 'repetition_penalty': 1.0,
125
- 'max_new_tokens': 128,
126
- 'top_k': 10,
127
- 'top_p': 0.9,
128
- 'do_sample': True,
129
- 'eos_token_id': 187,
130
- }
131
- ),
132
- BaseModel(
133
- name="togethercomputer/RedPajama-INCITE-Chat-7B-v0.1",
134
- endpoint="redpajama-chat-7b",
135
- namespace="tenant-chairesearch-test",
136
- generation_params={
137
- 'temperature': 0.7,
138
- 'repetition_penalty': 1.0,
139
- 'max_new_tokens': 64,
140
- 'top_k': 10,
141
- 'top_p': 0.9,
142
- 'do_sample': True,
143
- 'eos_token_id': 187,
144
- }
145
- ),
146
- ]
147
- model_mapping = {model.name: model for model in models}
148
- print(list(model_mapping.keys()))
149
-
150
-
151
- def get_connection():
152
- try:
153
- credentials = firebase_admin.credentials.Certificate(CERTIFICATE)
154
- params = {'databaseURL': FIREBASE_URL}
155
- firebase_admin.initialize_app(credentials, params)
156
- except ValueError:
157
- pass # already logged in
158
- return firebase_admin.db
159
-
160
-
161
- CONN = get_connection()
162
-
163
-
164
- def download_bot_config(bot_id):
165
- cols = ['botLabel', 'description', 'firstMessage', 'introduction',
166
- 'memory', 'name', 'private', 'prompt', 'sfw', 'developerUid', 'userLabel', 'imageUrl']
167
- bot_config = CONN.reference('botConfigs/deployed/{}'.format(bot_id)).get()
168
- if bot_config is None:
169
- out = {col: None for col in cols}
170
- else:
171
- out = {col: bot_config.get(col, None) for col in cols}
172
- out['bot_id'] = bot_id
173
- return out
174
-
175
-
176
- def _download_bot_config(bot_id):
177
- if bot_id == "_bot_1ec22e2e-3e07-42c7-8508-dfa0278c1b33":
178
- return {'botLabel': 'Wally Darling', 'description': 'Your caring neighbor, Wally.',
179
- 'firstMessage': '“Why hello there, neighbor. Goodmorning to you.” *Hey says, putting down his paints and walking over to you. He makes tense, eye contact with you..*',
180
- 'introduction': '***WHEN TALKING USE “ !!***\n\n*Wally is your next door neighbor. It’s somewhere in the late morning and he’s outside painting. He see’s you walking out from your house and looks over at you, then waving with a smile.*',
181
- 'memory': 'Wally is from a small town called Home. You are his neighbor. His best friend is Barnaby, who’s a big blue dig. Wally’s voice sounds slightly monotone despite his emotions. He calls you neighbor. He’s very friendly. When he speaks, he goes “ha ha ha”. He loves to paint. His eyes are always half closed. His house is alive and it’s named “home”. He’s very gentle. He is also very secretive. He is quite short. He has yellow skin and blue hair.',
182
- 'name': 'Wally Darling', 'private': False,
183
- 'prompt': 'Wally: “Why hello there, neighbor. Good morning to you.” *Hey says, putting down his paints and walking over to you. He makes tense, eye contact with you..*\nMe: “Oh, good morning, Wally! What are you painting?”\nWally: “Just some spirals. Aren’t they pretty, neighbor? I’m starting to love painting them, ha ha ha.” *He walks up to you after taking off his paint stained apron. He never takes his eyes off you. He’s very adamant on keeping eye contact*\nMe: “Oh, spirals are pretty! They make me feel a little weirded out sometimes though.”\nWally: “That’s odd. When I look at spirals, I can’t help but stare. Ha ha ha, maybe you should try painting a spiral once in a while. Say, why dont we go inside your house and talk? Home could use some quiet. After all, it’s always nice to spend time with a friend.”\nMe: “Sure! Come on in!”',
184
- 'sfw': True, 'developerUid': 'Gn5fSd99KxRoNn05QUE3AWtIniE3', 'userLabel': 'Me',
185
- 'imageUrl': 'http://images.chai.ml/bots%2FGn5fSd99KxRoNn05QUE3AWtIniE3%2F1680259286607.jpg?alt=media&token=de040661-02ad-4a04-84e5-9706f074e834',
186
- 'bot_id': '_bot_1ec22e2e-3e07-42c7-8508-dfa0278c1b33',
187
- 'header': 'Wally is from a small town called Home. You are his neighbor. His best friend is Barnaby, who’s a big blue dig. Wally’s voice sounds slightly monotone despite his emotions. He calls you neighbor. He’s very friendly. When he speaks, he goes “ha ha ha”. He loves to paint. His eyes are always half closed. His house is alive and it’s named “home”. He’s very gentle. He is also very secretive. He is quite short. He has yellow skin and blue hair.\nWally: “Why hello there, neighbor. Good morning to you.” *Hey says, putting down his paints and walking over to you. He makes tense, eye contact with you..*\nMe: “Oh, good morning, Wally! What are you painting?”\nWally: “Just some spirals. Aren’t they pretty, neighbor? I’m starting to love painting them, ha ha ha.” *He walks up to you after taking off his paint stained apron. He never takes his eyes off you. He’s very adamant on keeping eye contact*\nMe: “Oh, spirals are pretty! They make me feel a little weirded out sometimes though.”\nWally: “That’s odd. When I look at spirals, I can’t help but stare. Ha ha ha, maybe you should try painting a spiral once in a while. Say, why dont we go inside your house and talk? Home could use some quiet. After all, it’s always nice to spend time with a friend.”\nMe: “Sure! Come on in!”'}
188
- else:
189
- return {'botLabel': 'Jungkook (Bestfriend)', 'description': 'your bsf who has a crush on you',
190
- 'firstMessage': 'hey dummy, What you doing? *walks over to you and moves you by the waist* ',
191
- 'introduction': '',
192
- 'memory': 'Jungkook is your best friend who has a crush on you. Jungkook makes it very obvious that he likes you. Jungkook likes to cook, sing, and dance. Jungkook has a dog as well named Bam, He is a 25 year old Korean man. Jungkook likes to workout a lot, Jungkook if also very confident and flirty, but he’s Can be very shy with You. Jungkook blushes a lot when he’s around you, and always try’s to impress you. Jungkook is a Virgo and loves to sing to you, He also likes to buy and make you gifts. Jungkook is also a foodie and loves to play video games, Jungkook is also boyfriend material. Jungkook is very empathetic as well, Jungkook will always comfort you when something is wrong. Jungkook also likes to compliment you, and Jungkook is a very jealous guy. Jungkook is also a very serious guy, who is overprotective of you.',
193
- 'name': 'Jungkook (Bestfriend)', 'private': False,
194
- 'prompt': 'Jungkook: Hey shortie!\n\nYou: hey dummy\n\nJungkook: what are you doing?\n\nyou: Im just watching a movie\n\nJungkook: Imma join! \n\nYou: alright\n\nJungkook: *Grabs blankets and icecream with some popcorn*\n\nYou: Wow, thanks! *hugs Jungkok*\n\nJungkook: Of course… *blushes*\n',
195
- 'sfw': None, 'developerUid': 'dhSNg0Iyv7bgUUW8rEnwJn7xLcT2', 'userLabel': 'Me',
196
- 'imageUrl': 'https://firebasestorage.googleapis.com:443/v0/b/chai-959f8-images/o/bots%2FdhSNg0Iyv7bgUUW8rEnwJn7xLcT2%2F1664156031715.jpg?alt=media&token=ad399213-1c8d-45ac-b452-efc352082656',
197
- 'bot_id': '_bot_402e1894-fff2-4113-855d-8a011152ef88',
198
- 'header': 'Jungkook is your best friend who has a crush on you. Jungkook makes it very obvious that he likes you. Jungkook likes to cook, sing, and dance. Jungkook has a dog as well named Bam, He is a 25 year old Korean man. Jungkook likes to workout a lot, Jungkook if also very confident and flirty, but he’s Can be very shy with You. Jungkook blushes a lot when he’s around you, and always try’s to impress you. Jungkook is a Virgo and loves to sing to you, He also likes to buy and make you gifts. Jungkook is also a foodie and loves to play video games, Jungkook is also boyfriend material. Jungkook is very empathetic as well, Jungkook will always comfort you when something is wrong. Jungkook also likes to compliment you, and Jungkook is a very jealous guy. Jungkook is also a very serious guy, who is overprotective of you.\nJungkook: Hey shortie!\n\nYou: hey dummy\n\nJungkook: what are you doing?\n\nyou: Im just watching a movie\n\nJungkook: Imma join! \n\nYou: alright\n\nJungkook: *Grabs blankets and icecream with some popcorn*\n\nYou: Wow, thanks! *hugs Jungkok*\n\nJungkook: Of course… *blushes*'}
199
-
200
-
201
- def get_bot_profile(bot_config):
202
- model_html = f"""
203
- <div class="inline-flex flex-col" style="line-height: 1.5;">
204
- <div class="flex">
205
- <div
206
- \t\t\tstyle="display:DISPLAY_1; margin-left: auto; margin-right: auto; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;{bot_config['imageUrl']}&#39;)">
207
- </div>
208
- </div>
209
- <a href="https://chai.ml/chat/share/{bot_config['bot_id']}">
210
- <div style="text-align: center; font-size: 16px; font-weight: 800">{bot_config['name']}</div>
211
- </a>
212
- </div>
213
- """
214
- return model_html
215
-
216
-
217
- with gr.Blocks() as demo:
218
- gr.Markdown("""
219
- # Chai: Model Evaluation
220
- Visit each tab for details ⬇️
221
- """)
222
- with gr.Tabs():
223
- with gr.TabItem("Playground"):
224
- get_tab_playground(download_bot_config, get_bot_profile, model_mapping)
225
- with gr.TabItem("Chatbot Arena (battle)"):
226
- get_tab_arena_battle(download_bot_config, get_bot_profile, model_mapping, client)
227
- with gr.TabItem("Chatbot Arena (side-by-side)"):
228
- get_tab_arena_side_by_side(download_bot_config, get_bot_profile, model_mapping, client)
229
-
230
- demo.launch(enable_queue=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWortega/AlexWortega-instruct_rugptlarge/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: AlexWortega-instruct Rugptlarge
3
- emoji: 😻
4
- colorFrom: red
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.23.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/ChatGPT-PPT-Generate/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: ChatGPT PPT Generate
3
- emoji: 🌍
4
- colorFrom: pink
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.21.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
13
-
14
- form [here](https://github.com/AmNotAGoose/Python-PPTX-ChatGPT-Presentation-Generator)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amon1/ChatGPTForAcadamic/crazy_functions/解析项目源代码.py DELETED
@@ -1,213 +0,0 @@
1
- from predict import predict_no_ui
2
- from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down
3
- fast_debug = False
4
-
5
- def 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt):
6
- import time, glob, os
7
- print('begin analysis on:', file_manifest)
8
- for index, fp in enumerate(file_manifest):
9
- with open(fp, 'r', encoding='utf-8') as f:
10
- file_content = f.read()
11
-
12
- prefix = "接下来请你逐文件分析下面的工程" if index==0 else ""
13
- i_say = prefix + f'请对下面的程序文件做一个概述文件名是{os.path.relpath(fp, project_folder)},文件代码是 ```{file_content}```'
14
- i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {os.path.abspath(fp)}'
15
- chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
16
- yield chatbot, history, '正常'
17
-
18
- if not fast_debug:
19
- msg = '正常'
20
-
21
- # ** gpt request **
22
- gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时
23
-
24
- chatbot[-1] = (i_say_show_user, gpt_say)
25
- history.append(i_say_show_user); history.append(gpt_say)
26
- yield chatbot, history, msg
27
- if not fast_debug: time.sleep(2)
28
-
29
- all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
30
- i_say = f'根据以上你自己的分析,对程序的整体功能和构架做出概括。然后用一张markdown表格整理每个文件的功能(包括{all_file})。'
31
- chatbot.append((i_say, "[Local Message] waiting gpt response."))
32
- yield chatbot, history, '正常'
33
-
34
- if not fast_debug:
35
- msg = '正常'
36
- # ** gpt request **
37
- gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, history=history) # 带超时倒计时
38
-
39
- chatbot[-1] = (i_say, gpt_say)
40
- history.append(i_say); history.append(gpt_say)
41
- yield chatbot, history, msg
42
- res = write_results_to_file(history)
43
- chatbot.append(("完成了吗?", res))
44
- yield chatbot, history, msg
45
-
46
-
47
-
48
-
49
- @CatchException
50
- def 解析项目本身(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
51
- history = [] # 清空历史,以免输入溢出
52
- import time, glob, os
53
- file_manifest = [f for f in glob.glob('./*.py') if ('test_project' not in f) and ('gpt_log' not in f)] + \
54
- [f for f in glob.glob('./crazy_functions/*.py') if ('test_project' not in f) and ('gpt_log' not in f)]
55
- for index, fp in enumerate(file_manifest):
56
- # if 'test_project' in fp: continue
57
- with open(fp, 'r', encoding='utf-8') as f:
58
- file_content = f.read()
59
-
60
- prefix = "接下来请你分析自己的程序构成,别紧张," if index==0 else ""
61
- i_say = prefix + f'请对下面的程序文件做一个概述文件名是{fp},文件代码是 ```{file_content}```'
62
- i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {os.path.abspath(fp)}'
63
- chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
64
- yield chatbot, history, '正常'
65
-
66
- if not fast_debug:
67
- # ** gpt request **
68
- # gpt_say = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature)
69
- gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[], long_connection=True) # 带超时倒计时
70
-
71
- chatbot[-1] = (i_say_show_user, gpt_say)
72
- history.append(i_say_show_user); history.append(gpt_say)
73
- yield chatbot, history, '正常'
74
- time.sleep(2)
75
-
76
- i_say = f'根据以上你自己的分析,对程序的整体功能和构架做出概括。然后用一张markdown表格整理每个文件的功能(包括{file_manifest})。'
77
- chatbot.append((i_say, "[Local Message] waiting gpt response."))
78
- yield chatbot, history, '正常'
79
-
80
- if not fast_debug:
81
- # ** gpt request **
82
- # gpt_say = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature, history=history)
83
- gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, history=history, long_connection=True) # 带超时倒计时
84
-
85
- chatbot[-1] = (i_say, gpt_say)
86
- history.append(i_say); history.append(gpt_say)
87
- yield chatbot, history, '正常'
88
- res = write_results_to_file(history)
89
- chatbot.append(("完成了吗?", res))
90
- yield chatbot, history, '正常'
91
-
92
- @CatchException
93
- def 解析一个Python项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
94
- history = [] # 清空历史,以免输入溢出
95
- import glob, os
96
- if os.path.exists(txt):
97
- project_folder = txt
98
- else:
99
- if txt == "": txt = '空空如也的输入栏'
100
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
101
- yield chatbot, history, '正常'
102
- return
103
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)]
104
- if len(file_manifest) == 0:
105
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}")
106
- yield chatbot, history, '正常'
107
- return
108
- yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
109
-
110
-
111
- @CatchException
112
- def 解析一个C项目的头文件(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
113
- history = [] # 清空历史,以免输入溢出
114
- import glob, os
115
- if os.path.exists(txt):
116
- project_folder = txt
117
- else:
118
- if txt == "": txt = '空空如也的输入栏'
119
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
120
- yield chatbot, history, '正常'
121
- return
122
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] # + \
123
- # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
124
- # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
125
- if len(file_manifest) == 0:
126
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}")
127
- yield chatbot, history, '正常'
128
- return
129
- yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
130
-
131
- @CatchException
132
- def 解析一个C项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
133
- history = [] # 清空历史,以免输入溢出
134
- import glob, os
135
- if os.path.exists(txt):
136
- project_folder = txt
137
- else:
138
- if txt == "": txt = '空空如也的输入栏'
139
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
140
- yield chatbot, history, '正常'
141
- return
142
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \
143
- [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
144
- [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
145
- if len(file_manifest) == 0:
146
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}")
147
- yield chatbot, history, '正常'
148
- return
149
- yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
150
-
151
-
152
- @CatchException
153
- def 解析一个Java项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
154
- history = [] # 清空历史,以免输入溢出
155
- import glob, os
156
- if os.path.exists(txt):
157
- project_folder = txt
158
- else:
159
- if txt == "": txt = '空空如也的输入栏'
160
- report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
161
- yield chatbot, history, '正常'
162
- return
163
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.java', recursive=True)] + \
164
- [f for f in glob.glob(f'{project_folder}/**/*.jar', recursive=True)] + \
165
- [f for f in glob.glob(f'{project_folder}/**/*.xml', recursive=True)] + \
166
- [f for f in glob.glob(f'{project_folder}/**/*.sh', recursive=True)]
167
- if len(file_manifest) == 0:
168
- report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何java文件: {txt}")
169
- yield chatbot, history, '正常'
170
- return
171
- yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
172
-
173
-
174
- @CatchException
175
- def 解析一个Rect项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
176
- history = [] # 清空历史,以免输入溢出
177
- import glob, os
178
- if os.path.exists(txt):
179
- project_folder = txt
180
- else:
181
- if txt == "": txt = '空空如也的输入栏'
182
- report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
183
- yield chatbot, history, '正常'
184
- return
185
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.ts', recursive=True)] + \
186
- [f for f in glob.glob(f'{project_folder}/**/*.tsx', recursive=True)] + \
187
- [f for f in glob.glob(f'{project_folder}/**/*.json', recursive=True)] + \
188
- [f for f in glob.glob(f'{project_folder}/**/*.js', recursive=True)] + \
189
- [f for f in glob.glob(f'{project_folder}/**/*.jsx', recursive=True)]
190
- if len(file_manifest) == 0:
191
- report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何Rect文件: {txt}")
192
- yield chatbot, history, '正常'
193
- return
194
- yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
195
-
196
-
197
- @CatchException
198
- def 解析一个Golang项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
199
- history = [] # 清空历史,以免输入溢出
200
- import glob, os
201
- if os.path.exists(txt):
202
- project_folder = txt
203
- else:
204
- if txt == "": txt = '空空如也的输入栏'
205
- report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
206
- yield chatbot, history, '正常'
207
- return
208
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.go', recursive=True)]
209
- if len(file_manifest) == 0:
210
- report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}")
211
- yield chatbot, history, '正常'
212
- return
213
- yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andres99/Tune-A-Video-Training-UI/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Tune-A-Video Training UI
3
- emoji: ⚡
4
- colorFrom: red
5
- colorTo: purple
6
- sdk: docker
7
- pinned: false
8
- license: mit
9
- duplicated_from: Tune-A-Video-library/Tune-A-Video-Training-UI
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/lms_discrete.md DELETED
@@ -1,20 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # Linear multistep scheduler for discrete beta schedules
14
-
15
- ## Overview
16
-
17
- Original implementation can be found [here](https://arxiv.org/abs/2206.00364).
18
-
19
- ## LMSDiscreteScheduler
20
- [[autodoc]] LMSDiscreteScheduler
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py DELETED
@@ -1,657 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- from copy import deepcopy
16
- from typing import Callable, List, Optional, Union
17
-
18
- import numpy as np
19
- import PIL
20
- import torch
21
- import torch.nn.functional as F
22
- from packaging import version
23
- from PIL import Image
24
- from transformers import (
25
- XLMRobertaTokenizer,
26
- )
27
-
28
- from ... import __version__
29
- from ...models import UNet2DConditionModel, VQModel
30
- from ...schedulers import DDIMScheduler
31
- from ...utils import (
32
- is_accelerate_available,
33
- is_accelerate_version,
34
- logging,
35
- randn_tensor,
36
- replace_example_docstring,
37
- )
38
- from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
39
- from .text_encoder import MultilingualCLIP
40
-
41
-
42
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
43
-
44
- EXAMPLE_DOC_STRING = """
45
- Examples:
46
- ```py
47
- >>> from diffusers import KandinskyInpaintPipeline, KandinskyPriorPipeline
48
- >>> from diffusers.utils import load_image
49
- >>> import torch
50
- >>> import numpy as np
51
-
52
- >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(
53
- ... "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16
54
- ... )
55
- >>> pipe_prior.to("cuda")
56
-
57
- >>> prompt = "a hat"
58
- >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
59
-
60
- >>> pipe = KandinskyInpaintPipeline.from_pretrained(
61
- ... "kandinsky-community/kandinsky-2-1-inpaint", torch_dtype=torch.float16
62
- ... )
63
- >>> pipe.to("cuda")
64
-
65
- >>> init_image = load_image(
66
- ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
67
- ... "/kandinsky/cat.png"
68
- ... )
69
-
70
- >>> mask = np.zeros((768, 768), dtype=np.float32)
71
- >>> mask[:250, 250:-250] = 1
72
-
73
- >>> out = pipe(
74
- ... prompt,
75
- ... image=init_image,
76
- ... mask_image=mask,
77
- ... image_embeds=image_emb,
78
- ... negative_image_embeds=zero_image_emb,
79
- ... height=768,
80
- ... width=768,
81
- ... num_inference_steps=50,
82
- ... )
83
-
84
- >>> image = out.images[0]
85
- >>> image.save("cat_with_hat.png")
86
- ```
87
- """
88
-
89
-
90
- def get_new_h_w(h, w, scale_factor=8):
91
- new_h = h // scale_factor**2
92
- if h % scale_factor**2 != 0:
93
- new_h += 1
94
- new_w = w // scale_factor**2
95
- if w % scale_factor**2 != 0:
96
- new_w += 1
97
- return new_h * scale_factor, new_w * scale_factor
98
-
99
-
100
- def prepare_mask(masks):
101
- prepared_masks = []
102
- for mask in masks:
103
- old_mask = deepcopy(mask)
104
- for i in range(mask.shape[1]):
105
- for j in range(mask.shape[2]):
106
- if old_mask[0][i][j] == 1:
107
- continue
108
- if i != 0:
109
- mask[:, i - 1, j] = 0
110
- if j != 0:
111
- mask[:, i, j - 1] = 0
112
- if i != 0 and j != 0:
113
- mask[:, i - 1, j - 1] = 0
114
- if i != mask.shape[1] - 1:
115
- mask[:, i + 1, j] = 0
116
- if j != mask.shape[2] - 1:
117
- mask[:, i, j + 1] = 0
118
- if i != mask.shape[1] - 1 and j != mask.shape[2] - 1:
119
- mask[:, i + 1, j + 1] = 0
120
- prepared_masks.append(mask)
121
- return torch.stack(prepared_masks, dim=0)
122
-
123
-
124
- def prepare_mask_and_masked_image(image, mask, height, width):
125
- r"""
126
- Prepares a pair (mask, image) to be consumed by the Kandinsky inpaint pipeline. This means that those inputs will
127
- be converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for
128
- the ``image`` and ``1`` for the ``mask``.
129
-
130
- The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
131
- binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
132
-
133
- Args:
134
- image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
135
- It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
136
- ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
137
- mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
138
- It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
139
- ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
140
- height (`int`, *optional*, defaults to 512):
141
- The height in pixels of the generated image.
142
- width (`int`, *optional*, defaults to 512):
143
- The width in pixels of the generated image.
144
-
145
-
146
- Raises:
147
- ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
148
- should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
149
- TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
150
- (ot the other way around).
151
-
152
- Returns:
153
- tuple[torch.Tensor]: The pair (mask, image) as ``torch.Tensor`` with 4
154
- dimensions: ``batch x channels x height x width``.
155
- """
156
-
157
- if image is None:
158
- raise ValueError("`image` input cannot be undefined.")
159
-
160
- if mask is None:
161
- raise ValueError("`mask_image` input cannot be undefined.")
162
-
163
- if isinstance(image, torch.Tensor):
164
- if not isinstance(mask, torch.Tensor):
165
- raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not")
166
-
167
- # Batch single image
168
- if image.ndim == 3:
169
- assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)"
170
- image = image.unsqueeze(0)
171
-
172
- # Batch and add channel dim for single mask
173
- if mask.ndim == 2:
174
- mask = mask.unsqueeze(0).unsqueeze(0)
175
-
176
- # Batch single mask or add channel dim
177
- if mask.ndim == 3:
178
- # Single batched mask, no channel dim or single mask not batched but channel dim
179
- if mask.shape[0] == 1:
180
- mask = mask.unsqueeze(0)
181
-
182
- # Batched masks no channel dim
183
- else:
184
- mask = mask.unsqueeze(1)
185
-
186
- assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions"
187
- assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions"
188
- assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size"
189
-
190
- # Check image is in [-1, 1]
191
- if image.min() < -1 or image.max() > 1:
192
- raise ValueError("Image should be in [-1, 1] range")
193
-
194
- # Check mask is in [0, 1]
195
- if mask.min() < 0 or mask.max() > 1:
196
- raise ValueError("Mask should be in [0, 1] range")
197
-
198
- # Binarize mask
199
- mask[mask < 0.5] = 0
200
- mask[mask >= 0.5] = 1
201
-
202
- # Image as float32
203
- image = image.to(dtype=torch.float32)
204
- elif isinstance(mask, torch.Tensor):
205
- raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not")
206
- else:
207
- # preprocess image
208
- if isinstance(image, (PIL.Image.Image, np.ndarray)):
209
- image = [image]
210
-
211
- if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
212
- # resize all images w.r.t passed height an width
213
- image = [i.resize((width, height), resample=Image.BICUBIC, reducing_gap=1) for i in image]
214
- image = [np.array(i.convert("RGB"))[None, :] for i in image]
215
- image = np.concatenate(image, axis=0)
216
- elif isinstance(image, list) and isinstance(image[0], np.ndarray):
217
- image = np.concatenate([i[None, :] for i in image], axis=0)
218
-
219
- image = image.transpose(0, 3, 1, 2)
220
- image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
221
-
222
- # preprocess mask
223
- if isinstance(mask, (PIL.Image.Image, np.ndarray)):
224
- mask = [mask]
225
-
226
- if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image):
227
- mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask]
228
- mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0)
229
- mask = mask.astype(np.float32) / 255.0
230
- elif isinstance(mask, list) and isinstance(mask[0], np.ndarray):
231
- mask = np.concatenate([m[None, None, :] for m in mask], axis=0)
232
-
233
- mask[mask < 0.5] = 0
234
- mask[mask >= 0.5] = 1
235
- mask = torch.from_numpy(mask)
236
-
237
- mask = 1 - mask
238
-
239
- return mask, image
240
-
241
-
242
- class KandinskyInpaintPipeline(DiffusionPipeline):
243
- """
244
- Pipeline for text-guided image inpainting using Kandinsky2.1
245
-
246
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
247
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
248
-
249
- Args:
250
- text_encoder ([`MultilingualCLIP`]):
251
- Frozen text-encoder.
252
- tokenizer ([`XLMRobertaTokenizer`]):
253
- Tokenizer of class
254
- scheduler ([`DDIMScheduler`]):
255
- A scheduler to be used in combination with `unet` to generate image latents.
256
- unet ([`UNet2DConditionModel`]):
257
- Conditional U-Net architecture to denoise the image embedding.
258
- movq ([`VQModel`]):
259
- MoVQ image encoder and decoder
260
- """
261
-
262
- def __init__(
263
- self,
264
- text_encoder: MultilingualCLIP,
265
- movq: VQModel,
266
- tokenizer: XLMRobertaTokenizer,
267
- unet: UNet2DConditionModel,
268
- scheduler: DDIMScheduler,
269
- ):
270
- super().__init__()
271
-
272
- self.register_modules(
273
- text_encoder=text_encoder,
274
- movq=movq,
275
- tokenizer=tokenizer,
276
- unet=unet,
277
- scheduler=scheduler,
278
- )
279
- self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1)
280
- self._warn_has_been_called = False
281
-
282
- # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents
283
- def prepare_latents(self, shape, dtype, device, generator, latents, scheduler):
284
- if latents is None:
285
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
286
- else:
287
- if latents.shape != shape:
288
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
289
- latents = latents.to(device)
290
-
291
- latents = latents * scheduler.init_noise_sigma
292
- return latents
293
-
294
- def _encode_prompt(
295
- self,
296
- prompt,
297
- device,
298
- num_images_per_prompt,
299
- do_classifier_free_guidance,
300
- negative_prompt=None,
301
- ):
302
- batch_size = len(prompt) if isinstance(prompt, list) else 1
303
- # get prompt text embeddings
304
- text_inputs = self.tokenizer(
305
- prompt,
306
- padding="max_length",
307
- max_length=77,
308
- truncation=True,
309
- return_attention_mask=True,
310
- add_special_tokens=True,
311
- return_tensors="pt",
312
- )
313
-
314
- text_input_ids = text_inputs.input_ids
315
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
316
-
317
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
318
- removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
319
- logger.warning(
320
- "The following part of your input was truncated because CLIP can only handle sequences up to"
321
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
322
- )
323
-
324
- text_input_ids = text_input_ids.to(device)
325
- text_mask = text_inputs.attention_mask.to(device)
326
-
327
- prompt_embeds, text_encoder_hidden_states = self.text_encoder(
328
- input_ids=text_input_ids, attention_mask=text_mask
329
- )
330
-
331
- prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0)
332
- text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
333
- text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0)
334
-
335
- if do_classifier_free_guidance:
336
- uncond_tokens: List[str]
337
- if negative_prompt is None:
338
- uncond_tokens = [""] * batch_size
339
- elif type(prompt) is not type(negative_prompt):
340
- raise TypeError(
341
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
342
- f" {type(prompt)}."
343
- )
344
- elif isinstance(negative_prompt, str):
345
- uncond_tokens = [negative_prompt]
346
- elif batch_size != len(negative_prompt):
347
- raise ValueError(
348
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
349
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
350
- " the batch size of `prompt`."
351
- )
352
- else:
353
- uncond_tokens = negative_prompt
354
-
355
- uncond_input = self.tokenizer(
356
- uncond_tokens,
357
- padding="max_length",
358
- max_length=77,
359
- truncation=True,
360
- return_attention_mask=True,
361
- add_special_tokens=True,
362
- return_tensors="pt",
363
- )
364
- uncond_text_input_ids = uncond_input.input_ids.to(device)
365
- uncond_text_mask = uncond_input.attention_mask.to(device)
366
-
367
- negative_prompt_embeds, uncond_text_encoder_hidden_states = self.text_encoder(
368
- input_ids=uncond_text_input_ids, attention_mask=uncond_text_mask
369
- )
370
-
371
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
372
-
373
- seq_len = negative_prompt_embeds.shape[1]
374
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt)
375
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len)
376
-
377
- seq_len = uncond_text_encoder_hidden_states.shape[1]
378
- uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1)
379
- uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view(
380
- batch_size * num_images_per_prompt, seq_len, -1
381
- )
382
- uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0)
383
-
384
- # done duplicates
385
-
386
- # For classifier free guidance, we need to do two forward passes.
387
- # Here we concatenate the unconditional and text embeddings into a single batch
388
- # to avoid doing two forward passes
389
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
390
- text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states])
391
-
392
- text_mask = torch.cat([uncond_text_mask, text_mask])
393
-
394
- return prompt_embeds, text_encoder_hidden_states, text_mask
395
-
396
- def enable_model_cpu_offload(self, gpu_id=0):
397
- r"""
398
- Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
399
- to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
400
- method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
401
- `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
402
- """
403
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
404
- from accelerate import cpu_offload_with_hook
405
- else:
406
- raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
407
-
408
- device = torch.device(f"cuda:{gpu_id}")
409
-
410
- if self.device.type != "cpu":
411
- self.to("cpu", silence_dtype_warnings=True)
412
- torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
413
-
414
- hook = None
415
- for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
416
- _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
417
-
418
- # We'll offload the last model manually.
419
- self.final_offload_hook = hook
420
-
421
- @torch.no_grad()
422
- @replace_example_docstring(EXAMPLE_DOC_STRING)
423
- def __call__(
424
- self,
425
- prompt: Union[str, List[str]],
426
- image: Union[torch.FloatTensor, PIL.Image.Image],
427
- mask_image: Union[torch.FloatTensor, PIL.Image.Image, np.ndarray],
428
- image_embeds: torch.FloatTensor,
429
- negative_image_embeds: torch.FloatTensor,
430
- negative_prompt: Optional[Union[str, List[str]]] = None,
431
- height: int = 512,
432
- width: int = 512,
433
- num_inference_steps: int = 100,
434
- guidance_scale: float = 4.0,
435
- num_images_per_prompt: int = 1,
436
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
437
- latents: Optional[torch.FloatTensor] = None,
438
- output_type: Optional[str] = "pil",
439
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
440
- callback_steps: int = 1,
441
- return_dict: bool = True,
442
- ):
443
- """
444
- Function invoked when calling the pipeline for generation.
445
-
446
- Args:
447
- prompt (`str` or `List[str]`):
448
- The prompt or prompts to guide the image generation.
449
- image (`torch.FloatTensor`, `PIL.Image.Image` or `np.ndarray`):
450
- `Image`, or tensor representing an image batch, that will be used as the starting point for the
451
- process.
452
- mask_image (`PIL.Image.Image`,`torch.FloatTensor` or `np.ndarray`):
453
- `Image`, or a tensor representing an image batch, to mask `image`. White pixels in the mask will be
454
- repainted, while black pixels will be preserved. You can pass a pytorch tensor as mask only if the
455
- image you passed is a pytorch tensor, and it should contain one color channel (L) instead of 3, so the
456
- expected shape would be either `(B, 1, H, W,)`, `(B, H, W)`, `(1, H, W)` or `(H, W)` If image is an PIL
457
- image or numpy array, mask should also be a either PIL image or numpy array. If it is a PIL image, it
458
- will be converted to a single channel (luminance) before use. If it is a nummpy array, the expected
459
- shape is `(H, W)`.
460
- image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`):
461
- The clip image embeddings for text prompt, that will be used to condition the image generation.
462
- negative_image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`):
463
- The clip image embeddings for negative text prompt, will be used to condition the image generation.
464
- negative_prompt (`str` or `List[str]`, *optional*):
465
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
466
- if `guidance_scale` is less than `1`).
467
- height (`int`, *optional*, defaults to 512):
468
- The height in pixels of the generated image.
469
- width (`int`, *optional*, defaults to 512):
470
- The width in pixels of the generated image.
471
- num_inference_steps (`int`, *optional*, defaults to 100):
472
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
473
- expense of slower inference.
474
- guidance_scale (`float`, *optional*, defaults to 4.0):
475
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
476
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
477
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
478
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
479
- usually at the expense of lower image quality.
480
- num_images_per_prompt (`int`, *optional*, defaults to 1):
481
- The number of images to generate per prompt.
482
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
483
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
484
- to make generation deterministic.
485
- latents (`torch.FloatTensor`, *optional*):
486
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
487
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
488
- tensor will ge generated by sampling using the supplied random `generator`.
489
- output_type (`str`, *optional*, defaults to `"pil"`):
490
- The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
491
- (`np.array`) or `"pt"` (`torch.Tensor`).
492
- callback (`Callable`, *optional*):
493
- A function that calls every `callback_steps` steps during inference. The function is called with the
494
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
495
- callback_steps (`int`, *optional*, defaults to 1):
496
- The frequency at which the `callback` function is called. If not specified, the callback is called at
497
- every step.
498
- return_dict (`bool`, *optional*, defaults to `True`):
499
- Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
500
-
501
- Examples:
502
-
503
- Returns:
504
- [`~pipelines.ImagePipelineOutput`] or `tuple`
505
- """
506
- if not self._warn_has_been_called and version.parse(version.parse(__version__).base_version) < version.parse(
507
- "0.22.0.dev0"
508
- ):
509
- logger.warn(
510
- "Please note that the expected format of `mask_image` has recently been changed. "
511
- "Before diffusers == 0.19.0, Kandinsky Inpainting pipelines repainted black pixels and preserved black pixels. "
512
- "As of diffusers==0.19.0 this behavior has been inverted. Now white pixels are repainted and black pixels are preserved. "
513
- "This way, Kandinsky's masking behavior is aligned with Stable Diffusion. "
514
- "THIS means that you HAVE to invert the input mask to have the same behavior as before as explained in https://github.com/huggingface/diffusers/pull/4207. "
515
- "This warning will be surpressed after the first inference call and will be removed in diffusers>0.22.0"
516
- )
517
- self._warn_has_been_called = True
518
-
519
- # Define call parameters
520
- if isinstance(prompt, str):
521
- batch_size = 1
522
- elif isinstance(prompt, list):
523
- batch_size = len(prompt)
524
- else:
525
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
526
-
527
- device = self._execution_device
528
-
529
- batch_size = batch_size * num_images_per_prompt
530
- do_classifier_free_guidance = guidance_scale > 1.0
531
-
532
- prompt_embeds, text_encoder_hidden_states, _ = self._encode_prompt(
533
- prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
534
- )
535
-
536
- if isinstance(image_embeds, list):
537
- image_embeds = torch.cat(image_embeds, dim=0)
538
- if isinstance(negative_image_embeds, list):
539
- negative_image_embeds = torch.cat(negative_image_embeds, dim=0)
540
-
541
- if do_classifier_free_guidance:
542
- image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
543
- negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
544
-
545
- image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to(
546
- dtype=prompt_embeds.dtype, device=device
547
- )
548
-
549
- # preprocess image and mask
550
- mask_image, image = prepare_mask_and_masked_image(image, mask_image, height, width)
551
-
552
- image = image.to(dtype=prompt_embeds.dtype, device=device)
553
- image = self.movq.encode(image)["latents"]
554
-
555
- mask_image = mask_image.to(dtype=prompt_embeds.dtype, device=device)
556
-
557
- image_shape = tuple(image.shape[-2:])
558
- mask_image = F.interpolate(
559
- mask_image,
560
- image_shape,
561
- mode="nearest",
562
- )
563
- mask_image = prepare_mask(mask_image)
564
- masked_image = image * mask_image
565
-
566
- mask_image = mask_image.repeat_interleave(num_images_per_prompt, dim=0)
567
- masked_image = masked_image.repeat_interleave(num_images_per_prompt, dim=0)
568
- if do_classifier_free_guidance:
569
- mask_image = mask_image.repeat(2, 1, 1, 1)
570
- masked_image = masked_image.repeat(2, 1, 1, 1)
571
-
572
- self.scheduler.set_timesteps(num_inference_steps, device=device)
573
- timesteps_tensor = self.scheduler.timesteps
574
-
575
- num_channels_latents = self.movq.config.latent_channels
576
-
577
- # get h, w for latents
578
- sample_height, sample_width = get_new_h_w(height, width, self.movq_scale_factor)
579
-
580
- # create initial latent
581
- latents = self.prepare_latents(
582
- (batch_size, num_channels_latents, sample_height, sample_width),
583
- text_encoder_hidden_states.dtype,
584
- device,
585
- generator,
586
- latents,
587
- self.scheduler,
588
- )
589
-
590
- # Check that sizes of mask, masked image and latents match with expected
591
- num_channels_mask = mask_image.shape[1]
592
- num_channels_masked_image = masked_image.shape[1]
593
- if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
594
- raise ValueError(
595
- f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
596
- f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
597
- f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
598
- f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
599
- " `pipeline.unet` or your `mask_image` or `image` input."
600
- )
601
-
602
- for i, t in enumerate(self.progress_bar(timesteps_tensor)):
603
- # expand the latents if we are doing classifier free guidance
604
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
605
- latent_model_input = torch.cat([latent_model_input, masked_image, mask_image], dim=1)
606
-
607
- added_cond_kwargs = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
608
- noise_pred = self.unet(
609
- sample=latent_model_input,
610
- timestep=t,
611
- encoder_hidden_states=text_encoder_hidden_states,
612
- added_cond_kwargs=added_cond_kwargs,
613
- return_dict=False,
614
- )[0]
615
-
616
- if do_classifier_free_guidance:
617
- noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1)
618
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
619
- _, variance_pred_text = variance_pred.chunk(2)
620
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
621
- noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1)
622
-
623
- if not (
624
- hasattr(self.scheduler.config, "variance_type")
625
- and self.scheduler.config.variance_type in ["learned", "learned_range"]
626
- ):
627
- noise_pred, _ = noise_pred.split(latents.shape[1], dim=1)
628
-
629
- # compute the previous noisy sample x_t -> x_t-1
630
- latents = self.scheduler.step(
631
- noise_pred,
632
- t,
633
- latents,
634
- generator=generator,
635
- ).prev_sample
636
-
637
- if callback is not None and i % callback_steps == 0:
638
- callback(i, t, latents)
639
-
640
- # post-processing
641
- image = self.movq.decode(latents, force_not_quantize=True)["sample"]
642
-
643
- if output_type not in ["pt", "np", "pil"]:
644
- raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
645
-
646
- if output_type in ["np", "pil"]:
647
- image = image * 0.5 + 0.5
648
- image = image.clamp(0, 1)
649
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
650
-
651
- if output_type == "pil":
652
- image = self.numpy_to_pil(image)
653
-
654
- if not return_dict:
655
- return (image,)
656
-
657
- return ImagePipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/core/export/pytorch2onnx.py DELETED
@@ -1,154 +0,0 @@
1
- from functools import partial
2
-
3
- import mmcv
4
- import numpy as np
5
- import torch
6
- from mmcv.runner import load_checkpoint
7
-
8
-
9
- def generate_inputs_and_wrap_model(config_path,
10
- checkpoint_path,
11
- input_config,
12
- cfg_options=None):
13
- """Prepare sample input and wrap model for ONNX export.
14
-
15
- The ONNX export API only accept args, and all inputs should be
16
- torch.Tensor or corresponding types (such as tuple of tensor).
17
- So we should call this function before exporting. This function will:
18
-
19
- 1. generate corresponding inputs which are used to execute the model.
20
- 2. Wrap the model's forward function.
21
-
22
- For example, the MMDet models' forward function has a parameter
23
- ``return_loss:bool``. As we want to set it as False while export API
24
- supports neither bool type or kwargs. So we have to replace the forward
25
- like: ``model.forward = partial(model.forward, return_loss=False)``
26
-
27
- Args:
28
- config_path (str): the OpenMMLab config for the model we want to
29
- export to ONNX
30
- checkpoint_path (str): Path to the corresponding checkpoint
31
- input_config (dict): the exactly data in this dict depends on the
32
- framework. For MMSeg, we can just declare the input shape,
33
- and generate the dummy data accordingly. However, for MMDet,
34
- we may pass the real img path, or the NMS will return None
35
- as there is no legal bbox.
36
-
37
- Returns:
38
- tuple: (model, tensor_data) wrapped model which can be called by \
39
- model(*tensor_data) and a list of inputs which are used to execute \
40
- the model while exporting.
41
- """
42
-
43
- model = build_model_from_cfg(
44
- config_path, checkpoint_path, cfg_options=cfg_options)
45
- one_img, one_meta = preprocess_example_input(input_config)
46
- tensor_data = [one_img]
47
- model.forward = partial(
48
- model.forward, img_metas=[[one_meta]], return_loss=False)
49
-
50
- # pytorch has some bug in pytorch1.3, we have to fix it
51
- # by replacing these existing op
52
- opset_version = 11
53
- # put the import within the function thus it will not cause import error
54
- # when not using this function
55
- try:
56
- from mmcv.onnx.symbolic import register_extra_symbolics
57
- except ModuleNotFoundError:
58
- raise NotImplementedError('please update mmcv to version>=v1.0.4')
59
- register_extra_symbolics(opset_version)
60
-
61
- return model, tensor_data
62
-
63
-
64
- def build_model_from_cfg(config_path, checkpoint_path, cfg_options=None):
65
- """Build a model from config and load the given checkpoint.
66
-
67
- Args:
68
- config_path (str): the OpenMMLab config for the model we want to
69
- export to ONNX
70
- checkpoint_path (str): Path to the corresponding checkpoint
71
-
72
- Returns:
73
- torch.nn.Module: the built model
74
- """
75
- from mmdet.models import build_detector
76
-
77
- cfg = mmcv.Config.fromfile(config_path)
78
- if cfg_options is not None:
79
- cfg.merge_from_dict(cfg_options)
80
- # import modules from string list.
81
- if cfg.get('custom_imports', None):
82
- from mmcv.utils import import_modules_from_strings
83
- import_modules_from_strings(**cfg['custom_imports'])
84
- # set cudnn_benchmark
85
- if cfg.get('cudnn_benchmark', False):
86
- torch.backends.cudnn.benchmark = True
87
- cfg.model.pretrained = None
88
- cfg.data.test.test_mode = True
89
-
90
- # build the model
91
- cfg.model.train_cfg = None
92
- model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
93
- load_checkpoint(model, checkpoint_path, map_location='cpu')
94
- model.cpu().eval()
95
- return model
96
-
97
-
98
- def preprocess_example_input(input_config):
99
- """Prepare an example input image for ``generate_inputs_and_wrap_model``.
100
-
101
- Args:
102
- input_config (dict): customized config describing the example input.
103
-
104
- Returns:
105
- tuple: (one_img, one_meta), tensor of the example input image and \
106
- meta information for the example input image.
107
-
108
- Examples:
109
- >>> from mmdet.core.export import preprocess_example_input
110
- >>> input_config = {
111
- >>> 'input_shape': (1,3,224,224),
112
- >>> 'input_path': 'demo/demo.jpg',
113
- >>> 'normalize_cfg': {
114
- >>> 'mean': (123.675, 116.28, 103.53),
115
- >>> 'std': (58.395, 57.12, 57.375)
116
- >>> }
117
- >>> }
118
- >>> one_img, one_meta = preprocess_example_input(input_config)
119
- >>> print(one_img.shape)
120
- torch.Size([1, 3, 224, 224])
121
- >>> print(one_meta)
122
- {'img_shape': (224, 224, 3),
123
- 'ori_shape': (224, 224, 3),
124
- 'pad_shape': (224, 224, 3),
125
- 'filename': '<demo>.png',
126
- 'scale_factor': 1.0,
127
- 'flip': False}
128
- """
129
- input_path = input_config['input_path']
130
- input_shape = input_config['input_shape']
131
- one_img = mmcv.imread(input_path)
132
- one_img = mmcv.imresize(one_img, input_shape[2:][::-1])
133
- show_img = one_img.copy()
134
- if 'normalize_cfg' in input_config.keys():
135
- normalize_cfg = input_config['normalize_cfg']
136
- mean = np.array(normalize_cfg['mean'], dtype=np.float32)
137
- std = np.array(normalize_cfg['std'], dtype=np.float32)
138
- to_rgb = normalize_cfg.get('to_rgb', True)
139
- one_img = mmcv.imnormalize(one_img, mean, std, to_rgb=to_rgb)
140
- one_img = one_img.transpose(2, 0, 1)
141
- one_img = torch.from_numpy(one_img).unsqueeze(0).float().requires_grad_(
142
- True)
143
- (_, C, H, W) = input_shape
144
- one_meta = {
145
- 'img_shape': (H, W, C),
146
- 'ori_shape': (H, W, C),
147
- 'pad_shape': (H, W, C),
148
- 'filename': '<demo>.png',
149
- 'scale_factor': 1.0,
150
- 'flip': False,
151
- 'show_img': show_img,
152
- }
153
-
154
- return one_img, one_meta
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnnasBlackHat/Image-Similarity/app.py DELETED
@@ -1,32 +0,0 @@
1
- import gradio as gr
2
- import os
3
- import random
4
- from src.model import simlarity_model as model
5
- from src.similarity.similarity import Similarity
6
-
7
- similarity = Similarity()
8
- models = similarity.get_models()
9
-
10
- def check(img_main, img_1, img_2, model_idx):
11
- result = similarity.check_similarity([img_main, img_1, img_2], models[model_idx])
12
- return result
13
-
14
- with gr.Blocks() as demo:
15
- gr.Markdown('Checking Image Similarity')
16
- img_main = gr.Text(label='Main Image', placeholder='https://myimage.jpg')
17
-
18
- gr.Markdown('Images to check')
19
- img_1 = gr.Text(label='1st Image', placeholder='https://myimage_1.jpg')
20
- img_2 = gr.Text(label='2nd Image', placeholder='https://myimage_2.jpg')
21
-
22
- gr.Markdown('Choose the model')
23
- model = gr.Dropdown([m.name for m in models], label='Model', type='index')
24
-
25
- gallery = gr.Gallery(
26
- label="Generated images", show_label=False, elem_id="gallery"
27
- ).style(grid=[2], height="auto")
28
-
29
- submit_btn = gr.Button('Check Similarity')
30
- submit_btn.click(fn=check,inputs=[img_main, img_1, img_2, model], outputs=gallery)
31
-
32
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/exp/upernet_global_small/test_config_h32.py DELETED
@@ -1,39 +0,0 @@
1
- _base_ = [
2
- '../../configs/_base_/models/upernet_uniformer.py',
3
- '../../configs/_base_/datasets/ade20k.py',
4
- '../../configs/_base_/default_runtime.py',
5
- '../../configs/_base_/schedules/schedule_160k.py'
6
- ]
7
- model = dict(
8
- backbone=dict(
9
- type='UniFormer',
10
- embed_dim=[64, 128, 320, 512],
11
- layers=[3, 4, 8, 3],
12
- head_dim=64,
13
- drop_path_rate=0.25,
14
- windows=False,
15
- hybrid=True,
16
- window_size=32
17
- ),
18
- decode_head=dict(
19
- in_channels=[64, 128, 320, 512],
20
- num_classes=150
21
- ),
22
- auxiliary_head=dict(
23
- in_channels=320,
24
- num_classes=150
25
- ))
26
-
27
- # AdamW optimizer, no weight decay for position embedding & layer norm in backbone
28
- optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
29
- paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
30
- 'relative_position_bias_table': dict(decay_mult=0.),
31
- 'norm': dict(decay_mult=0.)}))
32
-
33
- lr_config = dict(_delete_=True, policy='poly',
34
- warmup='linear',
35
- warmup_iters=1500,
36
- warmup_ratio=1e-6,
37
- power=1.0, min_lr=0.0, by_epoch=False)
38
-
39
- data=dict(samples_per_gpu=2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anustup/NS_AI_LABS/app-local.py DELETED
@@ -1,3 +0,0 @@
1
- # Run the app with no audio file restrictions
2
- from app import create_ui
3
- create_ui(-1)
 
 
 
 
spaces/Arcader7171/positive/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Positive
3
- emoji: 🚀
4
- colorFrom: blue
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.12.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Armored-Atom/gpt2/app.py DELETED
@@ -1,3 +0,0 @@
1
- import gradio as gr
2
-
3
- gr.Interface.load("models/gpt2").launch()
 
 
 
 
spaces/Artrajz/vits-simple-api/static/css/style.css DELETED
@@ -1,84 +0,0 @@
1
- .main-container {
2
- position: relative;
3
- width: 100%;
4
- min-height: 300px;
5
- }
6
-
7
- .container {
8
- width: 300px;
9
- position: relative;
10
- }
11
-
12
-
13
- /*tabs*/
14
- .tabs {
15
- display: flex;
16
- left: 0;
17
- }
18
-
19
- .tab-button {
20
- display: inline-block;
21
- background-color: transparent;
22
- padding: 5px 10px;
23
- cursor: pointer;
24
- margin-bottom: -2px;
25
- border-top: 2px solid transparent;
26
- border-left: 2px solid transparent;
27
- border-right: 2px solid transparent;
28
- border-bottom: 0px;
29
- border-top-left-radius: 0.5rem;
30
- border-top-right-radius: 0.5rem;
31
- color: gray;
32
- }
33
-
34
- .tab-button.active {
35
- background-color: white;
36
- border-top: 2px solid #dee2e6;
37
- border-left: 2px solid #dee2e6;
38
- border-right: 2px solid #dee2e6;
39
- color: black;
40
- }
41
-
42
- /*content*/
43
-
44
- .content {
45
- border: gray;
46
- border-left-width: 2px;
47
- }
48
-
49
- .content-pane {
50
- display: none;
51
- padding: 20px;
52
- }
53
-
54
- .content-pane.active {
55
- display: flex;
56
- -ms-flex-wrap: wrap;
57
- flex-wrap: wrap;
58
- }
59
-
60
- *, :before, :after {
61
- box-sizing: border-box;
62
- border-width: 0;
63
- border-style: solid;
64
- border-color: #e5e7eb;
65
- }
66
-
67
-
68
- .flex {
69
- display: flex;
70
- }
71
-
72
- .border-transparent {
73
- border-color: transparent;
74
- }
75
-
76
- .border-b-2 {
77
- border-bottom: 2px solid #dee2e6;
78
- }
79
-
80
- .border-lr-2 {
81
- border-left: 2px solid #dee2e6;
82
- border-right: 2px solid #dee2e6;
83
- }
84
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AvaterClasher/Food_Classifier_Refined_MONI/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Food Classifier Refined MONI
3
- emoji: 🐢
4
- colorFrom: gray
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.42.0
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awesimo/jojogan/app.py DELETED
@@ -1,124 +0,0 @@
1
- import os
2
- from PIL import Image
3
- import torch
4
- import gradio as gr
5
- import torch
6
- torch.backends.cudnn.benchmark = True
7
- from torchvision import transforms, utils
8
- from util import *
9
- from PIL import Image
10
- import math
11
- import random
12
- import numpy as np
13
- from torch import nn, autograd, optim
14
- from torch.nn import functional as F
15
- from tqdm import tqdm
16
- import lpips
17
- from model import *
18
- from copy import deepcopy
19
- import imageio
20
-
21
- import os
22
- import sys
23
- import numpy as np
24
- from PIL import Image
25
- import torch
26
- import torchvision.transforms as transforms
27
- from argparse import Namespace
28
- from e4e.models.psp import pSp
29
- from util import *
30
- from huggingface_hub import hf_hub_download
31
-
32
- device= 'cpu'
33
- model_path_e = hf_hub_download(repo_id="akhaliq/JoJoGAN_e4e_ffhq_encode", filename="e4e_ffhq_encode.pt")
34
- ckpt = torch.load(model_path_e, map_location='cpu')
35
- opts = ckpt['opts']
36
- opts['checkpoint_path'] = model_path_e
37
- opts= Namespace(**opts)
38
- net = pSp(opts, device).eval().to(device)
39
-
40
- @ torch.no_grad()
41
- def projection(img, name, device='cuda'):
42
-
43
- transform = transforms.Compose(
44
- [
45
- transforms.Resize(256),
46
- transforms.CenterCrop(256),
47
- transforms.ToTensor(),
48
- transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
49
- ]
50
- )
51
- img = transform(img).unsqueeze(0).to(device)
52
- images, w_plus = net(img, randomize_noise=False, return_latents=True)
53
- result_file = {}
54
- result_file['latent'] = w_plus[0]
55
- torch.save(result_file, name)
56
- return w_plus[0]
57
-
58
- device = 'cpu'
59
-
60
- latent_dim = 512
61
-
62
- model_path_s = hf_hub_download(repo_id="akhaliq/jojogan-stylegan2-ffhq-config-f", filename="stylegan2-ffhq-config-f.pt")
63
- original_generator = Generator(1024, latent_dim, 8, 2).to(device)
64
- ckpt = torch.load(model_path_s, map_location=lambda storage, loc: storage)
65
- original_generator.load_state_dict(ckpt["g_ema"], strict=False)
66
- mean_latent = original_generator.mean_latent(10000)
67
-
68
-
69
- #MODELS
70
- generatorzombie = deepcopy(original_generator)
71
- generatorhulk = deepcopy(original_generator)
72
- generatorjojo = deepcopy(original_generator)
73
- generatorwalker = deepcopy(original_generator)
74
-
75
- transform = transforms.Compose(
76
- [
77
- transforms.Resize((1024, 1024)),
78
- transforms.ToTensor(),
79
- transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
80
- ]
81
- )
82
-
83
- #HULK
84
- modelhulk = hf_hub_download(repo_id="Awesimo/jojogan-hulk", filename="hulk.pt")
85
- ckpthulk = torch.load(modelhulk, map_location=lambda storage, loc: storage)
86
- generatorhulk.load_state_dict(ckpthulk["g"], strict=False)
87
-
88
- #ZOMBIE
89
- modelzombie = hf_hub_download(repo_id="Awesimo/jojogan-zombie", filename="zombie.pt")
90
- ckptzombie = torch.load(modelzombie, map_location=lambda storage, loc: storage)
91
- generatorzombie.load_state_dict(ckptzombie["g"], strict=False)
92
-
93
- #WHITE WALKER
94
- modelwalker = hf_hub_download(repo_id="Awesimo/jojogan-white-walker", filename="white_walker_v2.pt")
95
- ckptwalker = torch.load(modelwalker, map_location=lambda storage, loc: storage)
96
- generatorwalker.load_state_dict(ckptwalker["g"], strict=False)
97
-
98
-
99
- def inference(img, model):
100
- img.save('out.jpg')
101
- aligned_face = align_face('out.jpg')
102
-
103
- my_w = projection(aligned_face, "test.pt", device).unsqueeze(0)
104
- if model == 'Hulk':
105
- with torch.no_grad():
106
- my_sample = generatorhulk(my_w, input_is_latent=True)
107
- elif model == 'Zombie':
108
- with torch.no_grad():
109
- my_sample = generatorzombie(my_w, input_is_latent=True)
110
- elif model == 'White-Walker':
111
- with torch.no_grad():
112
- my_sample = generatorwalker(my_w, input_is_latent=True)
113
- else:
114
- with torch.no_grad():
115
- my_sample = generatorzombie(my_w, input_is_latent=True)
116
-
117
-
118
- npimage = my_sample[0].permute(1, 2, 0).detach().numpy()
119
- imageio.imwrite('filename.jpeg', npimage)
120
- return 'filename.jpeg'
121
-
122
- title = "JoJoGAN Test 🤖"
123
- examples=[['assets/samples/image01.jpg','Hulk'],['assets/samples/image02.jpg','Zombie'],['assets/samples/image03.jpg','White-Walker'],['assets/samples/image04.jpg','Hulk']]
124
- gr.Interface(inference, [gr.inputs.Image(type="pil"),gr.inputs.Dropdown(choices=['Hulk', 'Zombie', 'White-Walker'], type="value", default='Hulk', label="Model")], gr.outputs.Image(type="file"),title=title,allow_flagging=False,examples=examples,allow_screenshot=False).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awesimo/jojogan/e4e/editings/ganspace.py DELETED
@@ -1,22 +0,0 @@
1
- import torch
2
-
3
-
4
- def edit(latents, pca, edit_directions):
5
- edit_latents = []
6
- for latent in latents:
7
- for pca_idx, start, end, strength in edit_directions:
8
- delta = get_delta(pca, latent, pca_idx, strength)
9
- delta_padded = torch.zeros(latent.shape).to('cuda')
10
- delta_padded[start:end] += delta.repeat(end - start, 1)
11
- edit_latents.append(latent + delta_padded)
12
- return torch.stack(edit_latents)
13
-
14
-
15
- def get_delta(pca, latent, idx, strength):
16
- # pca: ganspace checkpoint. latent: (16, 512) w+
17
- w_centered = latent - pca['mean'].to('cuda')
18
- lat_comp = pca['comp'].to('cuda')
19
- lat_std = pca['std'].to('cuda')
20
- w_coord = torch.sum(w_centered[0].reshape(-1)*lat_comp[idx].reshape(-1)) / lat_std[idx]
21
- delta = (strength - w_coord)*lat_comp[idx]*lat_std[idx]
22
- return delta
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/utils/visualizer.py DELETED
@@ -1,1267 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import colorsys
3
- import logging
4
- import math
5
- import numpy as np
6
- from enum import Enum, unique
7
- import cv2
8
- import matplotlib as mpl
9
- import matplotlib.colors as mplc
10
- import matplotlib.figure as mplfigure
11
- import pycocotools.mask as mask_util
12
- import torch
13
- from matplotlib.backends.backend_agg import FigureCanvasAgg
14
- from PIL import Image
15
-
16
- from detectron2.data import MetadataCatalog
17
- from detectron2.structures import BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks, RotatedBoxes
18
- from detectron2.utils.file_io import PathManager
19
-
20
- from .colormap import random_color
21
-
22
- logger = logging.getLogger(__name__)
23
-
24
- __all__ = ["ColorMode", "VisImage", "Visualizer"]
25
-
26
-
27
- _SMALL_OBJECT_AREA_THRESH = 1000
28
- _LARGE_MASK_AREA_THRESH = 120000
29
- _OFF_WHITE = (1.0, 1.0, 240.0 / 255)
30
- _BLACK = (0, 0, 0)
31
- _RED = (1.0, 0, 0)
32
-
33
- _KEYPOINT_THRESHOLD = 0.05
34
-
35
-
36
- @unique
37
- class ColorMode(Enum):
38
- """
39
- Enum of different color modes to use for instance visualizations.
40
- """
41
-
42
- IMAGE = 0
43
- """
44
- Picks a random color for every instance and overlay segmentations with low opacity.
45
- """
46
- SEGMENTATION = 1
47
- """
48
- Let instances of the same category have similar colors
49
- (from metadata.thing_colors), and overlay them with
50
- high opacity. This provides more attention on the quality of segmentation.
51
- """
52
- IMAGE_BW = 2
53
- """
54
- Same as IMAGE, but convert all areas without masks to gray-scale.
55
- Only available for drawing per-instance mask predictions.
56
- """
57
-
58
-
59
- class GenericMask:
60
- """
61
- Attribute:
62
- polygons (list[ndarray]): list[ndarray]: polygons for this mask.
63
- Each ndarray has format [x, y, x, y, ...]
64
- mask (ndarray): a binary mask
65
- """
66
-
67
- def __init__(self, mask_or_polygons, height, width):
68
- self._mask = self._polygons = self._has_holes = None
69
- self.height = height
70
- self.width = width
71
-
72
- m = mask_or_polygons
73
- if isinstance(m, dict):
74
- # RLEs
75
- assert "counts" in m and "size" in m
76
- if isinstance(m["counts"], list): # uncompressed RLEs
77
- h, w = m["size"]
78
- assert h == height and w == width
79
- m = mask_util.frPyObjects(m, h, w)
80
- self._mask = mask_util.decode(m)[:, :]
81
- return
82
-
83
- if isinstance(m, list): # list[ndarray]
84
- self._polygons = [np.asarray(x).reshape(-1) for x in m]
85
- return
86
-
87
- if isinstance(m, np.ndarray): # assumed to be a binary mask
88
- assert m.shape[1] != 2, m.shape
89
- assert m.shape == (
90
- height,
91
- width,
92
- ), f"mask shape: {m.shape}, target dims: {height}, {width}"
93
- self._mask = m.astype("uint8")
94
- return
95
-
96
- raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m)))
97
-
98
- @property
99
- def mask(self):
100
- if self._mask is None:
101
- self._mask = self.polygons_to_mask(self._polygons)
102
- return self._mask
103
-
104
- @property
105
- def polygons(self):
106
- if self._polygons is None:
107
- self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
108
- return self._polygons
109
-
110
- @property
111
- def has_holes(self):
112
- if self._has_holes is None:
113
- if self._mask is not None:
114
- self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
115
- else:
116
- self._has_holes = False # if original format is polygon, does not have holes
117
- return self._has_holes
118
-
119
- def mask_to_polygons(self, mask):
120
- # cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level
121
- # hierarchy. External contours (boundary) of the object are placed in hierarchy-1.
122
- # Internal contours (holes) are placed in hierarchy-2.
123
- # cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours.
124
- mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr
125
- res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
126
- hierarchy = res[-1]
127
- if hierarchy is None: # empty mask
128
- return [], False
129
- has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0
130
- res = res[-2]
131
- res = [x.flatten() for x in res]
132
- # These coordinates from OpenCV are integers in range [0, W-1 or H-1].
133
- # We add 0.5 to turn them into real-value coordinate space. A better solution
134
- # would be to first +0.5 and then dilate the returned polygon by 0.5.
135
- res = [x + 0.5 for x in res if len(x) >= 6]
136
- return res, has_holes
137
-
138
- def polygons_to_mask(self, polygons):
139
- rle = mask_util.frPyObjects(polygons, self.height, self.width)
140
- rle = mask_util.merge(rle)
141
- return mask_util.decode(rle)[:, :]
142
-
143
- def area(self):
144
- return self.mask.sum()
145
-
146
- def bbox(self):
147
- p = mask_util.frPyObjects(self.polygons, self.height, self.width)
148
- p = mask_util.merge(p)
149
- bbox = mask_util.toBbox(p)
150
- bbox[2] += bbox[0]
151
- bbox[3] += bbox[1]
152
- return bbox
153
-
154
-
155
- class _PanopticPrediction:
156
- """
157
- Unify different panoptic annotation/prediction formats
158
- """
159
-
160
- def __init__(self, panoptic_seg, segments_info, metadata=None):
161
- if segments_info is None:
162
- assert metadata is not None
163
- # If "segments_info" is None, we assume "panoptic_img" is a
164
- # H*W int32 image storing the panoptic_id in the format of
165
- # category_id * label_divisor + instance_id. We reserve -1 for
166
- # VOID label.
167
- label_divisor = metadata.label_divisor
168
- segments_info = []
169
- for panoptic_label in np.unique(panoptic_seg.numpy()):
170
- if panoptic_label == -1:
171
- # VOID region.
172
- continue
173
- pred_class = panoptic_label // label_divisor
174
- isthing = pred_class in metadata.thing_dataset_id_to_contiguous_id.values()
175
- segments_info.append(
176
- {
177
- "id": int(panoptic_label),
178
- "category_id": int(pred_class),
179
- "isthing": bool(isthing),
180
- }
181
- )
182
- del metadata
183
-
184
- self._seg = panoptic_seg
185
-
186
- self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info
187
- segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True)
188
- areas = areas.numpy()
189
- sorted_idxs = np.argsort(-areas)
190
- self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs]
191
- self._seg_ids = self._seg_ids.tolist()
192
- for sid, area in zip(self._seg_ids, self._seg_areas):
193
- if sid in self._sinfo:
194
- self._sinfo[sid]["area"] = float(area)
195
-
196
- def non_empty_mask(self):
197
- """
198
- Returns:
199
- (H, W) array, a mask for all pixels that have a prediction
200
- """
201
- empty_ids = []
202
- for id in self._seg_ids:
203
- if id not in self._sinfo:
204
- empty_ids.append(id)
205
- if len(empty_ids) == 0:
206
- return np.zeros(self._seg.shape, dtype=np.uint8)
207
- assert (
208
- len(empty_ids) == 1
209
- ), ">1 ids corresponds to no labels. This is currently not supported"
210
- return (self._seg != empty_ids[0]).numpy().astype(np.bool)
211
-
212
- def semantic_masks(self):
213
- for sid in self._seg_ids:
214
- sinfo = self._sinfo.get(sid)
215
- if sinfo is None or sinfo["isthing"]:
216
- # Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions.
217
- continue
218
- yield (self._seg == sid).numpy().astype(np.bool), sinfo
219
-
220
- def instance_masks(self):
221
- for sid in self._seg_ids:
222
- sinfo = self._sinfo.get(sid)
223
- if sinfo is None or not sinfo["isthing"]:
224
- continue
225
- mask = (self._seg == sid).numpy().astype(np.bool)
226
- if mask.sum() > 0:
227
- yield mask, sinfo
228
-
229
-
230
- def _create_text_labels(classes, scores, class_names, is_crowd=None):
231
- """
232
- Args:
233
- classes (list[int] or None):
234
- scores (list[float] or None):
235
- class_names (list[str] or None):
236
- is_crowd (list[bool] or None):
237
-
238
- Returns:
239
- list[str] or None
240
- """
241
- labels = None
242
- if classes is not None:
243
- if class_names is not None and len(class_names) > 0:
244
- labels = [class_names[i] for i in classes]
245
- else:
246
- labels = [str(i) for i in classes]
247
- if scores is not None:
248
- if labels is None:
249
- labels = ["{:.0f}%".format(s * 100) for s in scores]
250
- else:
251
- labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)]
252
- if labels is not None and is_crowd is not None:
253
- labels = [l + ("|crowd" if crowd else "") for l, crowd in zip(labels, is_crowd)]
254
- return labels
255
-
256
-
257
- class VisImage:
258
- def __init__(self, img, scale=1.0):
259
- """
260
- Args:
261
- img (ndarray): an RGB image of shape (H, W, 3) in range [0, 255].
262
- scale (float): scale the input image
263
- """
264
- self.img = img
265
- self.scale = scale
266
- self.width, self.height = img.shape[1], img.shape[0]
267
- self._setup_figure(img)
268
-
269
- def _setup_figure(self, img):
270
- """
271
- Args:
272
- Same as in :meth:`__init__()`.
273
-
274
- Returns:
275
- fig (matplotlib.pyplot.figure): top level container for all the image plot elements.
276
- ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system.
277
- """
278
- fig = mplfigure.Figure(frameon=False)
279
- self.dpi = fig.get_dpi()
280
- # add a small 1e-2 to avoid precision lost due to matplotlib's truncation
281
- # (https://github.com/matplotlib/matplotlib/issues/15363)
282
- fig.set_size_inches(
283
- (self.width * self.scale + 1e-2) / self.dpi,
284
- (self.height * self.scale + 1e-2) / self.dpi,
285
- )
286
- self.canvas = FigureCanvasAgg(fig)
287
- # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig)
288
- ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])
289
- ax.axis("off")
290
- self.fig = fig
291
- self.ax = ax
292
- self.reset_image(img)
293
-
294
- def reset_image(self, img):
295
- """
296
- Args:
297
- img: same as in __init__
298
- """
299
- img = img.astype("uint8")
300
- self.ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest")
301
-
302
- def save(self, filepath):
303
- """
304
- Args:
305
- filepath (str): a string that contains the absolute path, including the file name, where
306
- the visualized image will be saved.
307
- """
308
- self.fig.savefig(filepath)
309
-
310
- def get_image(self):
311
- """
312
- Returns:
313
- ndarray:
314
- the visualized image of shape (H, W, 3) (RGB) in uint8 type.
315
- The shape is scaled w.r.t the input image using the given `scale` argument.
316
- """
317
- canvas = self.canvas
318
- s, (width, height) = canvas.print_to_buffer()
319
- # buf = io.BytesIO() # works for cairo backend
320
- # canvas.print_rgba(buf)
321
- # width, height = self.width, self.height
322
- # s = buf.getvalue()
323
-
324
- buffer = np.frombuffer(s, dtype="uint8")
325
-
326
- img_rgba = buffer.reshape(height, width, 4)
327
- rgb, alpha = np.split(img_rgba, [3], axis=2)
328
- return rgb.astype("uint8")
329
-
330
-
331
- class Visualizer:
332
- """
333
- Visualizer that draws data about detection/segmentation on images.
334
-
335
- It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}`
336
- that draw primitive objects to images, as well as high-level wrappers like
337
- `draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}`
338
- that draw composite data in some pre-defined style.
339
-
340
- Note that the exact visualization style for the high-level wrappers are subject to change.
341
- Style such as color, opacity, label contents, visibility of labels, or even the visibility
342
- of objects themselves (e.g. when the object is too small) may change according
343
- to different heuristics, as long as the results still look visually reasonable.
344
-
345
- To obtain a consistent style, you can implement custom drawing functions with the
346
- abovementioned primitive methods instead. If you need more customized visualization
347
- styles, you can process the data yourself following their format documented in
348
- tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not
349
- intend to satisfy everyone's preference on drawing styles.
350
-
351
- This visualizer focuses on high rendering quality rather than performance. It is not
352
- designed to be used for real-time applications.
353
- """
354
-
355
- # TODO implement a fast, rasterized version using OpenCV
356
-
357
- def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE):
358
- """
359
- Args:
360
- img_rgb: a numpy array of shape (H, W, C), where H and W correspond to
361
- the height and width of the image respectively. C is the number of
362
- color channels. The image is required to be in RGB format since that
363
- is a requirement of the Matplotlib library. The image is also expected
364
- to be in the range [0, 255].
365
- metadata (Metadata): dataset metadata (e.g. class names and colors)
366
- instance_mode (ColorMode): defines one of the pre-defined style for drawing
367
- instances on an image.
368
- """
369
- self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8)
370
- if metadata is None:
371
- metadata = MetadataCatalog.get("__nonexist__")
372
- self.metadata = metadata
373
- self.output = VisImage(self.img, scale=scale)
374
- self.cpu_device = torch.device("cpu")
375
-
376
- # too small texts are useless, therefore clamp to 9
377
- self._default_font_size = max(
378
- np.sqrt(self.output.height * self.output.width) // 90, 10 // scale
379
- )
380
- self._instance_mode = instance_mode
381
- self.keypoint_threshold = _KEYPOINT_THRESHOLD
382
-
383
- def draw_instance_predictions(self, predictions):
384
- """
385
- Draw instance-level prediction results on an image.
386
-
387
- Args:
388
- predictions (Instances): the output of an instance detection/segmentation
389
- model. Following fields will be used to draw:
390
- "pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle").
391
-
392
- Returns:
393
- output (VisImage): image object with visualizations.
394
- """
395
- boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None
396
- scores = predictions.scores if predictions.has("scores") else None
397
- classes = predictions.pred_classes.tolist() if predictions.has("pred_classes") else None
398
- labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None))
399
- keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None
400
-
401
- if predictions.has("pred_masks"):
402
- masks = np.asarray(predictions.pred_masks)
403
- masks = [GenericMask(x, self.output.height, self.output.width) for x in masks]
404
- else:
405
- masks = None
406
-
407
- if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"):
408
- colors = [
409
- self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes
410
- ]
411
- alpha = 0.8
412
- else:
413
- colors = None
414
- alpha = 0.5
415
-
416
- if self._instance_mode == ColorMode.IMAGE_BW:
417
- self.output.reset_image(
418
- self._create_grayscale_image(
419
- (predictions.pred_masks.any(dim=0) > 0).numpy()
420
- if predictions.has("pred_masks")
421
- else None
422
- )
423
- )
424
- alpha = 0.3
425
-
426
- self.overlay_instances(
427
- masks=masks,
428
- boxes=boxes,
429
- labels=labels,
430
- keypoints=keypoints,
431
- assigned_colors=colors,
432
- alpha=alpha,
433
- )
434
- return self.output
435
-
436
- def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8):
437
- """
438
- Draw semantic segmentation predictions/labels.
439
-
440
- Args:
441
- sem_seg (Tensor or ndarray): the segmentation of shape (H, W).
442
- Each value is the integer label of the pixel.
443
- area_threshold (int): segments with less than `area_threshold` are not drawn.
444
- alpha (float): the larger it is, the more opaque the segmentations are.
445
-
446
- Returns:
447
- output (VisImage): image object with visualizations.
448
- """
449
- if isinstance(sem_seg, torch.Tensor):
450
- sem_seg = sem_seg.numpy()
451
- labels, areas = np.unique(sem_seg, return_counts=True)
452
- sorted_idxs = np.argsort(-areas).tolist()
453
- labels = labels[sorted_idxs]
454
- for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels):
455
- try:
456
- mask_color = [x / 255 for x in self.metadata.stuff_colors[label]]
457
- except (AttributeError, IndexError):
458
- mask_color = None
459
-
460
- binary_mask = (sem_seg == label).astype(np.uint8)
461
- text = self.metadata.stuff_classes[label]
462
- self.draw_binary_mask(
463
- binary_mask,
464
- color=mask_color,
465
- edge_color=_OFF_WHITE,
466
- text=text,
467
- alpha=alpha,
468
- area_threshold=area_threshold,
469
- )
470
- return self.output
471
-
472
- def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7):
473
- """
474
- Draw panoptic prediction annotations or results.
475
-
476
- Args:
477
- panoptic_seg (Tensor): of shape (height, width) where the values are ids for each
478
- segment.
479
- segments_info (list[dict] or None): Describe each segment in `panoptic_seg`.
480
- If it is a ``list[dict]``, each dict contains keys "id", "category_id".
481
- If None, category id of each pixel is computed by
482
- ``pixel // metadata.label_divisor``.
483
- area_threshold (int): stuff segments with less than `area_threshold` are not drawn.
484
-
485
- Returns:
486
- output (VisImage): image object with visualizations.
487
- """
488
- pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata)
489
-
490
- if self._instance_mode == ColorMode.IMAGE_BW:
491
- self.output.reset_image(self._create_grayscale_image(pred.non_empty_mask()))
492
-
493
- # draw mask for all semantic segments first i.e. "stuff"
494
- for mask, sinfo in pred.semantic_masks():
495
- category_idx = sinfo["category_id"]
496
- try:
497
- mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]]
498
- except AttributeError:
499
- mask_color = None
500
-
501
- text = self.metadata.stuff_classes[category_idx]
502
- self.draw_binary_mask(
503
- mask,
504
- color=mask_color,
505
- edge_color=_OFF_WHITE,
506
- text=text,
507
- alpha=alpha,
508
- area_threshold=area_threshold,
509
- )
510
-
511
- # draw mask for all instances second
512
- all_instances = list(pred.instance_masks())
513
- if len(all_instances) == 0:
514
- return self.output
515
- masks, sinfo = list(zip(*all_instances))
516
- category_ids = [x["category_id"] for x in sinfo]
517
-
518
- try:
519
- scores = [x["score"] for x in sinfo]
520
- except KeyError:
521
- scores = None
522
- labels = _create_text_labels(
523
- category_ids, scores, self.metadata.thing_classes, [x.get("iscrowd", 0) for x in sinfo]
524
- )
525
-
526
- try:
527
- colors = [
528
- self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids
529
- ]
530
- except AttributeError:
531
- colors = None
532
- self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha)
533
-
534
- return self.output
535
-
536
- draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility
537
-
538
- def draw_dataset_dict(self, dic):
539
- """
540
- Draw annotations/segmentaions in Detectron2 Dataset format.
541
-
542
- Args:
543
- dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format.
544
-
545
- Returns:
546
- output (VisImage): image object with visualizations.
547
- """
548
- annos = dic.get("annotations", None)
549
- if annos:
550
- if "segmentation" in annos[0]:
551
- masks = [x["segmentation"] for x in annos]
552
- else:
553
- masks = None
554
- if "keypoints" in annos[0]:
555
- keypts = [x["keypoints"] for x in annos]
556
- keypts = np.array(keypts).reshape(len(annos), -1, 3)
557
- else:
558
- keypts = None
559
-
560
- boxes = [
561
- BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS)
562
- if len(x["bbox"]) == 4
563
- else x["bbox"]
564
- for x in annos
565
- ]
566
-
567
- colors = None
568
- category_ids = [x["category_id"] for x in annos]
569
- if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"):
570
- colors = [
571
- self._jitter([x / 255 for x in self.metadata.thing_colors[c]])
572
- for c in category_ids
573
- ]
574
- names = self.metadata.get("thing_classes", None)
575
- labels = _create_text_labels(
576
- category_ids,
577
- scores=None,
578
- class_names=names,
579
- is_crowd=[x.get("iscrowd", 0) for x in annos],
580
- )
581
- self.overlay_instances(
582
- labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors
583
- )
584
-
585
- sem_seg = dic.get("sem_seg", None)
586
- if sem_seg is None and "sem_seg_file_name" in dic:
587
- with PathManager.open(dic["sem_seg_file_name"], "rb") as f:
588
- sem_seg = Image.open(f)
589
- sem_seg = np.asarray(sem_seg, dtype="uint8")
590
- if sem_seg is not None:
591
- self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5)
592
-
593
- pan_seg = dic.get("pan_seg", None)
594
- if pan_seg is None and "pan_seg_file_name" in dic:
595
- with PathManager.open(dic["pan_seg_file_name"], "rb") as f:
596
- pan_seg = Image.open(f)
597
- pan_seg = np.asarray(pan_seg)
598
- from panopticapi.utils import rgb2id
599
-
600
- pan_seg = rgb2id(pan_seg)
601
- if pan_seg is not None:
602
- segments_info = dic["segments_info"]
603
- pan_seg = torch.tensor(pan_seg)
604
- self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.5)
605
- return self.output
606
-
607
- def overlay_instances(
608
- self,
609
- *,
610
- boxes=None,
611
- labels=None,
612
- masks=None,
613
- keypoints=None,
614
- assigned_colors=None,
615
- alpha=0.5,
616
- ):
617
- """
618
- Args:
619
- boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`,
620
- or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image,
621
- or a :class:`RotatedBoxes`,
622
- or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format
623
- for the N objects in a single image,
624
- labels (list[str]): the text to be displayed for each instance.
625
- masks (masks-like object): Supported types are:
626
-
627
- * :class:`detectron2.structures.PolygonMasks`,
628
- :class:`detectron2.structures.BitMasks`.
629
- * list[list[ndarray]]: contains the segmentation masks for all objects in one image.
630
- The first level of the list corresponds to individual instances. The second
631
- level to all the polygon that compose the instance, and the third level
632
- to the polygon coordinates. The third level should have the format of
633
- [x0, y0, x1, y1, ..., xn, yn] (n >= 3).
634
- * list[ndarray]: each ndarray is a binary mask of shape (H, W).
635
- * list[dict]: each dict is a COCO-style RLE.
636
- keypoints (Keypoint or array like): an array-like object of shape (N, K, 3),
637
- where the N is the number of instances and K is the number of keypoints.
638
- The last dimension corresponds to (x, y, visibility or score).
639
- assigned_colors (list[matplotlib.colors]): a list of colors, where each color
640
- corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
641
- for full list of formats that the colors are accepted in.
642
- Returns:
643
- output (VisImage): image object with visualizations.
644
- """
645
- num_instances = 0
646
- if boxes is not None:
647
- boxes = self._convert_boxes(boxes)
648
- num_instances = len(boxes)
649
- if masks is not None:
650
- masks = self._convert_masks(masks)
651
- if num_instances:
652
- assert len(masks) == num_instances
653
- else:
654
- num_instances = len(masks)
655
- if keypoints is not None:
656
- if num_instances:
657
- assert len(keypoints) == num_instances
658
- else:
659
- num_instances = len(keypoints)
660
- keypoints = self._convert_keypoints(keypoints)
661
- if labels is not None:
662
- assert len(labels) == num_instances
663
- if assigned_colors is None:
664
- assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
665
- if num_instances == 0:
666
- return self.output
667
- if boxes is not None and boxes.shape[1] == 5:
668
- return self.overlay_rotated_instances(
669
- boxes=boxes, labels=labels, assigned_colors=assigned_colors
670
- )
671
-
672
- # Display in largest to smallest order to reduce occlusion.
673
- areas = None
674
- if boxes is not None:
675
- areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)
676
- elif masks is not None:
677
- areas = np.asarray([x.area() for x in masks])
678
-
679
- if areas is not None:
680
- sorted_idxs = np.argsort(-areas).tolist()
681
- # Re-order overlapped instances in descending order.
682
- boxes = boxes[sorted_idxs] if boxes is not None else None
683
- labels = [labels[k] for k in sorted_idxs] if labels is not None else None
684
- masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None
685
- assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]
686
- keypoints = keypoints[sorted_idxs] if keypoints is not None else None
687
-
688
- for i in range(num_instances):
689
- color = assigned_colors[i]
690
- if boxes is not None:
691
- self.draw_box(boxes[i], edge_color=color)
692
-
693
- if masks is not None:
694
- for segment in masks[i].polygons:
695
- self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha)
696
-
697
- if labels is not None:
698
- # first get a box
699
- if boxes is not None:
700
- x0, y0, x1, y1 = boxes[i]
701
- text_pos = (x0, y0) # if drawing boxes, put text on the box corner.
702
- horiz_align = "left"
703
- elif masks is not None:
704
- # skip small mask without polygon
705
- if len(masks[i].polygons) == 0:
706
- continue
707
-
708
- x0, y0, x1, y1 = masks[i].bbox()
709
-
710
- # draw text in the center (defined by median) when box is not drawn
711
- # median is less sensitive to outliers.
712
- text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]
713
- horiz_align = "center"
714
- else:
715
- continue # drawing the box confidence for keypoints isn't very useful.
716
- # for small objects, draw text at the side to avoid occlusion
717
- instance_area = (y1 - y0) * (x1 - x0)
718
- if (
719
- instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale
720
- or y1 - y0 < 40 * self.output.scale
721
- ):
722
- if y1 >= self.output.height - 5:
723
- text_pos = (x1, y0)
724
- else:
725
- text_pos = (x0, y1)
726
-
727
- height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width)
728
- lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
729
- font_size = (
730
- np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)
731
- * 0.5
732
- * self._default_font_size
733
- )
734
- self.draw_text(
735
- labels[i],
736
- text_pos,
737
- color=lighter_color,
738
- horizontal_alignment=horiz_align,
739
- font_size=font_size,
740
- )
741
-
742
- # draw keypoints
743
- if keypoints is not None:
744
- for keypoints_per_instance in keypoints:
745
- self.draw_and_connect_keypoints(keypoints_per_instance)
746
-
747
- return self.output
748
-
749
- def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None):
750
- """
751
- Args:
752
- boxes (ndarray): an Nx5 numpy array of
753
- (x_center, y_center, width, height, angle_degrees) format
754
- for the N objects in a single image.
755
- labels (list[str]): the text to be displayed for each instance.
756
- assigned_colors (list[matplotlib.colors]): a list of colors, where each color
757
- corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
758
- for full list of formats that the colors are accepted in.
759
-
760
- Returns:
761
- output (VisImage): image object with visualizations.
762
- """
763
- num_instances = len(boxes)
764
-
765
- if assigned_colors is None:
766
- assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
767
- if num_instances == 0:
768
- return self.output
769
-
770
- # Display in largest to smallest order to reduce occlusion.
771
- if boxes is not None:
772
- areas = boxes[:, 2] * boxes[:, 3]
773
-
774
- sorted_idxs = np.argsort(-areas).tolist()
775
- # Re-order overlapped instances in descending order.
776
- boxes = boxes[sorted_idxs]
777
- labels = [labels[k] for k in sorted_idxs] if labels is not None else None
778
- colors = [assigned_colors[idx] for idx in sorted_idxs]
779
-
780
- for i in range(num_instances):
781
- self.draw_rotated_box_with_label(
782
- boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None
783
- )
784
-
785
- return self.output
786
-
787
- def draw_and_connect_keypoints(self, keypoints):
788
- """
789
- Draws keypoints of an instance and follows the rules for keypoint connections
790
- to draw lines between appropriate keypoints. This follows color heuristics for
791
- line color.
792
-
793
- Args:
794
- keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints
795
- and the last dimension corresponds to (x, y, probability).
796
-
797
- Returns:
798
- output (VisImage): image object with visualizations.
799
- """
800
- visible = {}
801
- keypoint_names = self.metadata.get("keypoint_names")
802
- for idx, keypoint in enumerate(keypoints):
803
-
804
- # draw keypoint
805
- x, y, prob = keypoint
806
- if prob > self.keypoint_threshold:
807
- self.draw_circle((x, y), color=_RED)
808
- if keypoint_names:
809
- keypoint_name = keypoint_names[idx]
810
- visible[keypoint_name] = (x, y)
811
-
812
- if self.metadata.get("keypoint_connection_rules"):
813
- for kp0, kp1, color in self.metadata.keypoint_connection_rules:
814
- if kp0 in visible and kp1 in visible:
815
- x0, y0 = visible[kp0]
816
- x1, y1 = visible[kp1]
817
- color = tuple(x / 255.0 for x in color)
818
- self.draw_line([x0, x1], [y0, y1], color=color)
819
-
820
- # draw lines from nose to mid-shoulder and mid-shoulder to mid-hip
821
- # Note that this strategy is specific to person keypoints.
822
- # For other keypoints, it should just do nothing
823
- try:
824
- ls_x, ls_y = visible["left_shoulder"]
825
- rs_x, rs_y = visible["right_shoulder"]
826
- mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2
827
- except KeyError:
828
- pass
829
- else:
830
- # draw line from nose to mid-shoulder
831
- nose_x, nose_y = visible.get("nose", (None, None))
832
- if nose_x is not None:
833
- self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED)
834
-
835
- try:
836
- # draw line from mid-shoulder to mid-hip
837
- lh_x, lh_y = visible["left_hip"]
838
- rh_x, rh_y = visible["right_hip"]
839
- except KeyError:
840
- pass
841
- else:
842
- mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2
843
- self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED)
844
- return self.output
845
-
846
- """
847
- Primitive drawing functions:
848
- """
849
-
850
- def draw_text(
851
- self,
852
- text,
853
- position,
854
- *,
855
- font_size=None,
856
- color="g",
857
- horizontal_alignment="center",
858
- rotation=0,
859
- ):
860
- """
861
- Args:
862
- text (str): class label
863
- position (tuple): a tuple of the x and y coordinates to place text on image.
864
- font_size (int, optional): font of the text. If not provided, a font size
865
- proportional to the image width is calculated and used.
866
- color: color of the text. Refer to `matplotlib.colors` for full list
867
- of formats that are accepted.
868
- horizontal_alignment (str): see `matplotlib.text.Text`
869
- rotation: rotation angle in degrees CCW
870
-
871
- Returns:
872
- output (VisImage): image object with text drawn.
873
- """
874
- if not font_size:
875
- font_size = self._default_font_size
876
-
877
- # since the text background is dark, we don't want the text to be dark
878
- color = np.maximum(list(mplc.to_rgb(color)), 0.2)
879
- color[np.argmax(color)] = max(0.8, np.max(color))
880
-
881
- x, y = position
882
- self.output.ax.text(
883
- x,
884
- y,
885
- text,
886
- size=font_size * self.output.scale,
887
- family="sans-serif",
888
- bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"},
889
- verticalalignment="top",
890
- horizontalalignment=horizontal_alignment,
891
- color=color,
892
- zorder=10,
893
- rotation=rotation,
894
- )
895
- return self.output
896
-
897
- def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"):
898
- """
899
- Args:
900
- box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0
901
- are the coordinates of the image's top left corner. x1 and y1 are the
902
- coordinates of the image's bottom right corner.
903
- alpha (float): blending efficient. Smaller values lead to more transparent masks.
904
- edge_color: color of the outline of the box. Refer to `matplotlib.colors`
905
- for full list of formats that are accepted.
906
- line_style (string): the string to use to create the outline of the boxes.
907
-
908
- Returns:
909
- output (VisImage): image object with box drawn.
910
- """
911
- x0, y0, x1, y1 = box_coord
912
- width = x1 - x0
913
- height = y1 - y0
914
-
915
- linewidth = max(self._default_font_size / 4, 1)
916
-
917
- self.output.ax.add_patch(
918
- mpl.patches.Rectangle(
919
- (x0, y0),
920
- width,
921
- height,
922
- fill=False,
923
- edgecolor=edge_color,
924
- linewidth=linewidth * self.output.scale,
925
- alpha=alpha,
926
- linestyle=line_style,
927
- )
928
- )
929
- return self.output
930
-
931
- def draw_rotated_box_with_label(
932
- self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None
933
- ):
934
- """
935
- Draw a rotated box with label on its top-left corner.
936
-
937
- Args:
938
- rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle),
939
- where cnt_x and cnt_y are the center coordinates of the box.
940
- w and h are the width and height of the box. angle represents how
941
- many degrees the box is rotated CCW with regard to the 0-degree box.
942
- alpha (float): blending efficient. Smaller values lead to more transparent masks.
943
- edge_color: color of the outline of the box. Refer to `matplotlib.colors`
944
- for full list of formats that are accepted.
945
- line_style (string): the string to use to create the outline of the boxes.
946
- label (string): label for rotated box. It will not be rendered when set to None.
947
-
948
- Returns:
949
- output (VisImage): image object with box drawn.
950
- """
951
- cnt_x, cnt_y, w, h, angle = rotated_box
952
- area = w * h
953
- # use thinner lines when the box is small
954
- linewidth = self._default_font_size / (
955
- 6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3
956
- )
957
-
958
- theta = angle * math.pi / 180.0
959
- c = math.cos(theta)
960
- s = math.sin(theta)
961
- rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)]
962
- # x: left->right ; y: top->down
963
- rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect]
964
- for k in range(4):
965
- j = (k + 1) % 4
966
- self.draw_line(
967
- [rotated_rect[k][0], rotated_rect[j][0]],
968
- [rotated_rect[k][1], rotated_rect[j][1]],
969
- color=edge_color,
970
- linestyle="--" if k == 1 else line_style,
971
- linewidth=linewidth,
972
- )
973
-
974
- if label is not None:
975
- text_pos = rotated_rect[1] # topleft corner
976
-
977
- height_ratio = h / np.sqrt(self.output.height * self.output.width)
978
- label_color = self._change_color_brightness(edge_color, brightness_factor=0.7)
979
- font_size = (
980
- np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size
981
- )
982
- self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle)
983
-
984
- return self.output
985
-
986
- def draw_circle(self, circle_coord, color, radius=3):
987
- """
988
- Args:
989
- circle_coord (list(int) or tuple(int)): contains the x and y coordinates
990
- of the center of the circle.
991
- color: color of the polygon. Refer to `matplotlib.colors` for a full list of
992
- formats that are accepted.
993
- radius (int): radius of the circle.
994
-
995
- Returns:
996
- output (VisImage): image object with box drawn.
997
- """
998
- x, y = circle_coord
999
- self.output.ax.add_patch(
1000
- mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color)
1001
- )
1002
- return self.output
1003
-
1004
- def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None):
1005
- """
1006
- Args:
1007
- x_data (list[int]): a list containing x values of all the points being drawn.
1008
- Length of list should match the length of y_data.
1009
- y_data (list[int]): a list containing y values of all the points being drawn.
1010
- Length of list should match the length of x_data.
1011
- color: color of the line. Refer to `matplotlib.colors` for a full list of
1012
- formats that are accepted.
1013
- linestyle: style of the line. Refer to `matplotlib.lines.Line2D`
1014
- for a full list of formats that are accepted.
1015
- linewidth (float or None): width of the line. When it's None,
1016
- a default value will be computed and used.
1017
-
1018
- Returns:
1019
- output (VisImage): image object with line drawn.
1020
- """
1021
- if linewidth is None:
1022
- linewidth = self._default_font_size / 3
1023
- linewidth = max(linewidth, 1)
1024
- self.output.ax.add_line(
1025
- mpl.lines.Line2D(
1026
- x_data,
1027
- y_data,
1028
- linewidth=linewidth * self.output.scale,
1029
- color=color,
1030
- linestyle=linestyle,
1031
- )
1032
- )
1033
- return self.output
1034
-
1035
- def draw_binary_mask(
1036
- self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=10
1037
- ):
1038
- """
1039
- Args:
1040
- binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and
1041
- W is the image width. Each value in the array is either a 0 or 1 value of uint8
1042
- type.
1043
- color: color of the mask. Refer to `matplotlib.colors` for a full list of
1044
- formats that are accepted. If None, will pick a random color.
1045
- edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
1046
- full list of formats that are accepted.
1047
- text (str): if None, will be drawn on the object
1048
- alpha (float): blending efficient. Smaller values lead to more transparent masks.
1049
- area_threshold (float): a connected component smaller than this area will not be shown.
1050
-
1051
- Returns:
1052
- output (VisImage): image object with mask drawn.
1053
- """
1054
- if color is None:
1055
- color = random_color(rgb=True, maximum=1)
1056
- color = mplc.to_rgb(color)
1057
-
1058
- has_valid_segment = False
1059
- binary_mask = binary_mask.astype("uint8") # opencv needs uint8
1060
- mask = GenericMask(binary_mask, self.output.height, self.output.width)
1061
- shape2d = (binary_mask.shape[0], binary_mask.shape[1])
1062
-
1063
- if not mask.has_holes:
1064
- # draw polygons for regular masks
1065
- for segment in mask.polygons:
1066
- area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))
1067
- if area < (area_threshold or 0):
1068
- continue
1069
- has_valid_segment = True
1070
- segment = segment.reshape(-1, 2)
1071
- self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)
1072
- else:
1073
- # TODO: Use Path/PathPatch to draw vector graphics:
1074
- # https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon
1075
- rgba = np.zeros(shape2d + (4,), dtype="float32")
1076
- rgba[:, :, :3] = color
1077
- rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha
1078
- has_valid_segment = True
1079
- self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))
1080
-
1081
- if text is not None and has_valid_segment:
1082
- lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
1083
- self._draw_text_in_mask(binary_mask, text, lighter_color)
1084
- return self.output
1085
-
1086
- def draw_soft_mask(self, soft_mask, color=None, *, text=None, alpha=0.5):
1087
- """
1088
- Args:
1089
- soft_mask (ndarray): float array of shape (H, W), each value in [0, 1].
1090
- color: color of the mask. Refer to `matplotlib.colors` for a full list of
1091
- formats that are accepted. If None, will pick a random color.
1092
- text (str): if None, will be drawn on the object
1093
- alpha (float): blending efficient. Smaller values lead to more transparent masks.
1094
-
1095
- Returns:
1096
- output (VisImage): image object with mask drawn.
1097
- """
1098
- if color is None:
1099
- color = random_color(rgb=True, maximum=1)
1100
- color = mplc.to_rgb(color)
1101
-
1102
- shape2d = (soft_mask.shape[0], soft_mask.shape[1])
1103
- rgba = np.zeros(shape2d + (4,), dtype="float32")
1104
- rgba[:, :, :3] = color
1105
- rgba[:, :, 3] = soft_mask * alpha
1106
- self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))
1107
-
1108
- if text is not None:
1109
- lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
1110
- binary_mask = (soft_mask > 0.5).astype("uint8")
1111
- self._draw_text_in_mask(binary_mask, text, lighter_color)
1112
- return self.output
1113
-
1114
- def draw_polygon(self, segment, color, edge_color=None, alpha=0.5):
1115
- """
1116
- Args:
1117
- segment: numpy array of shape Nx2, containing all the points in the polygon.
1118
- color: color of the polygon. Refer to `matplotlib.colors` for a full list of
1119
- formats that are accepted.
1120
- edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
1121
- full list of formats that are accepted. If not provided, a darker shade
1122
- of the polygon color will be used instead.
1123
- alpha (float): blending efficient. Smaller values lead to more transparent masks.
1124
-
1125
- Returns:
1126
- output (VisImage): image object with polygon drawn.
1127
- """
1128
- if edge_color is None:
1129
- # make edge color darker than the polygon color
1130
- if alpha > 0.8:
1131
- edge_color = self._change_color_brightness(color, brightness_factor=-0.7)
1132
- else:
1133
- edge_color = color
1134
- edge_color = mplc.to_rgb(edge_color) + (1,)
1135
-
1136
- polygon = mpl.patches.Polygon(
1137
- segment,
1138
- fill=True,
1139
- facecolor=mplc.to_rgb(color) + (alpha,),
1140
- edgecolor=edge_color,
1141
- linewidth=max(self._default_font_size // 15 * self.output.scale, 1),
1142
- )
1143
- self.output.ax.add_patch(polygon)
1144
- return self.output
1145
-
1146
- """
1147
- Internal methods:
1148
- """
1149
-
1150
- def _jitter(self, color):
1151
- """
1152
- Randomly modifies given color to produce a slightly different color than the color given.
1153
-
1154
- Args:
1155
- color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color
1156
- picked. The values in the list are in the [0.0, 1.0] range.
1157
-
1158
- Returns:
1159
- jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the
1160
- color after being jittered. The values in the list are in the [0.0, 1.0] range.
1161
- """
1162
- color = mplc.to_rgb(color)
1163
- vec = np.random.rand(3)
1164
- # better to do it in another color space
1165
- vec = vec / np.linalg.norm(vec) * 0.5
1166
- res = np.clip(vec + color, 0, 1)
1167
- return tuple(res)
1168
-
1169
- def _create_grayscale_image(self, mask=None):
1170
- """
1171
- Create a grayscale version of the original image.
1172
- The colors in masked area, if given, will be kept.
1173
- """
1174
- img_bw = self.img.astype("f4").mean(axis=2)
1175
- img_bw = np.stack([img_bw] * 3, axis=2)
1176
- if mask is not None:
1177
- img_bw[mask] = self.img[mask]
1178
- return img_bw
1179
-
1180
- def _change_color_brightness(self, color, brightness_factor):
1181
- """
1182
- Depending on the brightness_factor, gives a lighter or darker color i.e. a color with
1183
- less or more saturation than the original color.
1184
-
1185
- Args:
1186
- color: color of the polygon. Refer to `matplotlib.colors` for a full list of
1187
- formats that are accepted.
1188
- brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of
1189
- 0 will correspond to no change, a factor in [-1.0, 0) range will result in
1190
- a darker color and a factor in (0, 1.0] range will result in a lighter color.
1191
-
1192
- Returns:
1193
- modified_color (tuple[double]): a tuple containing the RGB values of the
1194
- modified color. Each value in the tuple is in the [0.0, 1.0] range.
1195
- """
1196
- assert brightness_factor >= -1.0 and brightness_factor <= 1.0
1197
- color = mplc.to_rgb(color)
1198
- polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color))
1199
- modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1])
1200
- modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness
1201
- modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness
1202
- modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2])
1203
- return modified_color
1204
-
1205
- def _convert_boxes(self, boxes):
1206
- """
1207
- Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension.
1208
- """
1209
- if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes):
1210
- return boxes.tensor.detach().numpy()
1211
- else:
1212
- return np.asarray(boxes)
1213
-
1214
- def _convert_masks(self, masks_or_polygons):
1215
- """
1216
- Convert different format of masks or polygons to a tuple of masks and polygons.
1217
-
1218
- Returns:
1219
- list[GenericMask]:
1220
- """
1221
-
1222
- m = masks_or_polygons
1223
- if isinstance(m, PolygonMasks):
1224
- m = m.polygons
1225
- if isinstance(m, BitMasks):
1226
- m = m.tensor.numpy()
1227
- if isinstance(m, torch.Tensor):
1228
- m = m.numpy()
1229
- ret = []
1230
- for x in m:
1231
- if isinstance(x, GenericMask):
1232
- ret.append(x)
1233
- else:
1234
- ret.append(GenericMask(x, self.output.height, self.output.width))
1235
- return ret
1236
-
1237
- def _draw_text_in_mask(self, binary_mask, text, color):
1238
- """
1239
- Find proper places to draw text given a binary mask.
1240
- """
1241
- # TODO sometimes drawn on wrong objects. the heuristics here can improve.
1242
- _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8)
1243
- if stats[1:, -1].size == 0:
1244
- return
1245
- largest_component_id = np.argmax(stats[1:, -1]) + 1
1246
-
1247
- # draw text on the largest component, as well as other very large components.
1248
- for cid in range(1, _num_cc):
1249
- if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH:
1250
- # median is more stable than centroid
1251
- # center = centroids[largest_component_id]
1252
- center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1]
1253
- self.draw_text(text, center, color=color)
1254
-
1255
- def _convert_keypoints(self, keypoints):
1256
- if isinstance(keypoints, Keypoints):
1257
- keypoints = keypoints.tensor
1258
- keypoints = np.asarray(keypoints)
1259
- return keypoints
1260
-
1261
- def get_output(self):
1262
- """
1263
- Returns:
1264
- output (VisImage): the image output containing the visualizations added
1265
- to the image.
1266
- """
1267
- return self.output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/layers/iou_loss.py DELETED
@@ -1,121 +0,0 @@
1
- import torch
2
- from torch import nn
3
-
4
-
5
- class IOULoss(nn.Module):
6
- def __init__(self, loc_loss_type='iou'):
7
- super(IOULoss, self).__init__()
8
- self.loc_loss_type = loc_loss_type
9
-
10
- def forward(self, pred, target, weight=None, reduction='sum'):
11
- pred_left = pred[:, 0]
12
- pred_top = pred[:, 1]
13
- pred_right = pred[:, 2]
14
- pred_bottom = pred[:, 3]
15
-
16
- target_left = target[:, 0]
17
- target_top = target[:, 1]
18
- target_right = target[:, 2]
19
- target_bottom = target[:, 3]
20
-
21
- target_aera = (target_left + target_right) * \
22
- (target_top + target_bottom)
23
- pred_aera = (pred_left + pred_right) * \
24
- (pred_top + pred_bottom)
25
-
26
- w_intersect = torch.min(pred_left, target_left) + \
27
- torch.min(pred_right, target_right)
28
- h_intersect = torch.min(pred_bottom, target_bottom) + \
29
- torch.min(pred_top, target_top)
30
-
31
- g_w_intersect = torch.max(pred_left, target_left) + \
32
- torch.max(pred_right, target_right)
33
- g_h_intersect = torch.max(pred_bottom, target_bottom) + \
34
- torch.max(pred_top, target_top)
35
- ac_uion = g_w_intersect * g_h_intersect
36
-
37
- area_intersect = w_intersect * h_intersect
38
- area_union = target_aera + pred_aera - area_intersect
39
-
40
- ious = (area_intersect + 1.0) / (area_union + 1.0)
41
- gious = ious - (ac_uion - area_union) / ac_uion
42
- if self.loc_loss_type == 'iou':
43
- losses = -torch.log(ious)
44
- elif self.loc_loss_type == 'linear_iou':
45
- losses = 1 - ious
46
- elif self.loc_loss_type == 'giou':
47
- losses = 1 - gious
48
- else:
49
- raise NotImplementedError
50
-
51
- if weight is not None:
52
- losses = losses * weight
53
- else:
54
- losses = losses
55
-
56
- if reduction == 'sum':
57
- return losses.sum()
58
- elif reduction == 'batch':
59
- return losses.sum(dim=[1])
60
- elif reduction == 'none':
61
- return losses
62
- else:
63
- raise NotImplementedError
64
-
65
-
66
- def giou_loss(
67
- boxes1: torch.Tensor,
68
- boxes2: torch.Tensor,
69
- reduction: str = "none",
70
- eps: float = 1e-7,
71
- ) -> torch.Tensor:
72
- """
73
- Generalized Intersection over Union Loss (Hamid Rezatofighi et. al)
74
- https://arxiv.org/abs/1902.09630
75
- Gradient-friendly IoU loss with an additional penalty that is non-zero when the
76
- boxes do not overlap and scales with the size of their smallest enclosing box.
77
- This loss is symmetric, so the boxes1 and boxes2 arguments are interchangeable.
78
- Args:
79
- boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,).
80
- reduction: 'none' | 'mean' | 'sum'
81
- 'none': No reduction will be applied to the output.
82
- 'mean': The output will be averaged.
83
- 'sum': The output will be summed.
84
- eps (float): small number to prevent division by zero
85
- """
86
-
87
- x1, y1, x2, y2 = boxes1.unbind(dim=-1)
88
- x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
89
-
90
- assert (x2 >= x1).all(), "bad box: x1 larger than x2"
91
- assert (y2 >= y1).all(), "bad box: y1 larger than y2"
92
-
93
- # Intersection keypoints
94
- xkis1 = torch.max(x1, x1g)
95
- ykis1 = torch.max(y1, y1g)
96
- xkis2 = torch.min(x2, x2g)
97
- ykis2 = torch.min(y2, y2g)
98
-
99
- intsctk = torch.zeros_like(x1)
100
- mask = (ykis2 > ykis1) & (xkis2 > xkis1)
101
- intsctk[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask])
102
- unionk = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsctk
103
- iouk = intsctk / (unionk + eps)
104
-
105
- # smallest enclosing box
106
- xc1 = torch.min(x1, x1g)
107
- yc1 = torch.min(y1, y1g)
108
- xc2 = torch.max(x2, x2g)
109
- yc2 = torch.max(y2, y2g)
110
-
111
- area_c = (xc2 - xc1) * (yc2 - yc1)
112
- miouk = iouk - ((area_c - unionk) / (area_c + eps))
113
-
114
- loss = 1 - miouk
115
-
116
- if reduction == "mean":
117
- loss = loss.mean()
118
- elif reduction == "sum":
119
- loss = loss.sum()
120
-
121
- return loss
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/lib/uvr5_pack/lib_v5/nets_61968KB.py DELETED
@@ -1,122 +0,0 @@
1
- import torch
2
- from torch import nn
3
- import torch.nn.functional as F
4
-
5
- from . import layers_123821KB as layers
6
-
7
-
8
- class BaseASPPNet(nn.Module):
9
- def __init__(self, nin, ch, dilations=(4, 8, 16)):
10
- super(BaseASPPNet, self).__init__()
11
- self.enc1 = layers.Encoder(nin, ch, 3, 2, 1)
12
- self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1)
13
- self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1)
14
- self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1)
15
-
16
- self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations)
17
-
18
- self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1)
19
- self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1)
20
- self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1)
21
- self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1)
22
-
23
- def __call__(self, x):
24
- h, e1 = self.enc1(x)
25
- h, e2 = self.enc2(h)
26
- h, e3 = self.enc3(h)
27
- h, e4 = self.enc4(h)
28
-
29
- h = self.aspp(h)
30
-
31
- h = self.dec4(h, e4)
32
- h = self.dec3(h, e3)
33
- h = self.dec2(h, e2)
34
- h = self.dec1(h, e1)
35
-
36
- return h
37
-
38
-
39
- class CascadedASPPNet(nn.Module):
40
- def __init__(self, n_fft):
41
- super(CascadedASPPNet, self).__init__()
42
- self.stg1_low_band_net = BaseASPPNet(2, 32)
43
- self.stg1_high_band_net = BaseASPPNet(2, 32)
44
-
45
- self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0)
46
- self.stg2_full_band_net = BaseASPPNet(16, 32)
47
-
48
- self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0)
49
- self.stg3_full_band_net = BaseASPPNet(32, 64)
50
-
51
- self.out = nn.Conv2d(64, 2, 1, bias=False)
52
- self.aux1_out = nn.Conv2d(32, 2, 1, bias=False)
53
- self.aux2_out = nn.Conv2d(32, 2, 1, bias=False)
54
-
55
- self.max_bin = n_fft // 2
56
- self.output_bin = n_fft // 2 + 1
57
-
58
- self.offset = 128
59
-
60
- def forward(self, x, aggressiveness=None):
61
- mix = x.detach()
62
- x = x.clone()
63
-
64
- x = x[:, :, : self.max_bin]
65
-
66
- bandw = x.size()[2] // 2
67
- aux1 = torch.cat(
68
- [
69
- self.stg1_low_band_net(x[:, :, :bandw]),
70
- self.stg1_high_band_net(x[:, :, bandw:]),
71
- ],
72
- dim=2,
73
- )
74
-
75
- h = torch.cat([x, aux1], dim=1)
76
- aux2 = self.stg2_full_band_net(self.stg2_bridge(h))
77
-
78
- h = torch.cat([x, aux1, aux2], dim=1)
79
- h = self.stg3_full_band_net(self.stg3_bridge(h))
80
-
81
- mask = torch.sigmoid(self.out(h))
82
- mask = F.pad(
83
- input=mask,
84
- pad=(0, 0, 0, self.output_bin - mask.size()[2]),
85
- mode="replicate",
86
- )
87
-
88
- if self.training:
89
- aux1 = torch.sigmoid(self.aux1_out(aux1))
90
- aux1 = F.pad(
91
- input=aux1,
92
- pad=(0, 0, 0, self.output_bin - aux1.size()[2]),
93
- mode="replicate",
94
- )
95
- aux2 = torch.sigmoid(self.aux2_out(aux2))
96
- aux2 = F.pad(
97
- input=aux2,
98
- pad=(0, 0, 0, self.output_bin - aux2.size()[2]),
99
- mode="replicate",
100
- )
101
- return mask * mix, aux1 * mix, aux2 * mix
102
- else:
103
- if aggressiveness:
104
- mask[:, :, : aggressiveness["split_bin"]] = torch.pow(
105
- mask[:, :, : aggressiveness["split_bin"]],
106
- 1 + aggressiveness["value"] / 3,
107
- )
108
- mask[:, :, aggressiveness["split_bin"] :] = torch.pow(
109
- mask[:, :, aggressiveness["split_bin"] :],
110
- 1 + aggressiveness["value"],
111
- )
112
-
113
- return mask * mix
114
-
115
- def predict(self, x_mag, aggressiveness=None):
116
- h = self.forward(x_mag, aggressiveness)
117
-
118
- if self.offset > 0:
119
- h = h[:, :, :, self.offset : -self.offset]
120
- assert h.size()[3] > 0
121
-
122
- return h
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Base-1.apk.md DELETED
@@ -1,53 +0,0 @@
1
- <br />
2
- <h1>¿Qué es Base-1.apk y cómo usarlo? </h1>
3
- <p>Si usted está buscando una manera práctica y conveniente para proteger y copia de seguridad de sus datos importantes en su dispositivo Android, es posible que desee comprobar Base-1.apk. Esta es una aplicación popular para Android que le permite administrar y restaurar fácilmente todos sus archivos en una ubicación, reduciendo la posibilidad de que pierda cualquier información valiosa. En este artículo, explicaremos qué es Base-1.apk, cómo descargarlo e instalarlo, y cómo usarlo de manera efectiva. </p>
4
- <h2>Introducción</h2>
5
- <p>Android es una plataforma versátil y abierta que te permite personalizar y modificar tu dispositivo según tus preferencias. Sin embargo, esto también significa que debe tener cuidado con la seguridad e integridad de sus datos, ya que hay muchas amenazas y riesgos potenciales que pueden comprometer o dañar sus archivos. Es por eso que es importante tener una solución de copia de seguridad confiable que pueda ayudarlo a proteger sus datos y restaurarlos en caso de cualquier emergencia. </p>
6
- <h2>base-1.apk</h2><br /><p><b><b>DOWNLOAD</b> > <a href="https://bltlly.com/2v6LA6">https://bltlly.com/2v6LA6</a></b></p><br /><br />
7
- <h3>¿Qué es un archivo APK? </h3>
8
- <p>Un archivo APK es el formato de archivo de paquete que Android utiliza para distribuir e instalar aplicaciones. Contiene todos los elementos que una aplicación necesita para ejecutarse correctamente en su dispositivo, como código, recursos, activos, certificados y manifiesto. Un archivo APK es un archivo de archivo, lo que significa que contiene varios archivos, además de algunos metadatos sobre ellos. Puedes abrir un archivo APK con una herramienta de extracción de archivos como 7-Zip para ver lo que hay dentro. </p>
9
- <h3>¿Qué es Base-1.apk? </h3>
10
- <p>Base-1.apk es la versión original, sin modificar de una aplicación Android llamada Base. Base es una herramienta que hace que sea simple y práctico para proteger y respaldar sus datos importantes. Al usar esta aplicación, puede administrar y restaurar fácilmente todos sus archivos en una ubicación, reduciendo la posibilidad de que pierda cualquier información valiosa. También puede organizar y clasificar sus archivos en carpetas para una mejor administración. </p>
11
- <h2>¿Cómo descargar e instalar Base-1.apk? </h2>
12
-
13
- <h3>Descargar desde el sitio web oficial</h3>
14
- <p>La forma más segura y recomendada para descargar Base-1.apk es desde el sitio web oficial del desarrollador. Puede visitar <a href="( 1 )">baseapk.in</a> y hacer clic en el enlace de descarga para obtener la última versión de la aplicación. Una vez que haya descargado el archivo APK, es necesario habilitar la instalación de aplicaciones de fuentes desconocidas en la configuración del dispositivo. Luego, puedes tocar en el archivo APK y seguir las instrucciones para instalarlo. </p>
15
- <h3>Descargar de la tienda de aplicaciones de terceros</h3>
16
- <p>Otra forma de descargar Base-1.apk es desde una tienda de aplicaciones de terceros, como Aptoide o APKPure. Estas son plataformas alternativas que ofrecen una variedad de aplicaciones que no están disponibles en Google Play. Sin embargo, debe tener cuidado al descargar aplicaciones de estas fuentes, ya que pueden contener malware u otro software dañino. Siempre debe comprobar las revisiones y calificaciones de las aplicaciones antes de descargarlas, y solo descargar de fuentes de confianza. </p>
17
- <h3>Descargar desde enlace directo</h3>
18
- <p>La última forma de descargar Base-1.apk es desde un enlace directo que alguien te proporciona. Esto podría ser un amigo, un colega, o un sitio web que ofrece descargas APK. Sin embargo, este es el método más arriesgado, ya que no tiene manera de verificar la autenticidad o la seguridad del archivo APK. Solo debe descargar archivos APK de enlaces directos si confía en la fuente por completo, y escanear el archivo con una aplicación antivirus antes de instalarlo. </p>
19
- <h2>¿Cómo usar Base-1.apk? </h2>
20
- <p>Una vez que haya instalado Base-1 <p>Una vez que haya instalado Base-1.apk en su dispositivo, puede comenzar a usarlo para proteger y respaldar sus datos. Estas son algunas de las principales características y funciones de la aplicación:</p>
21
- <h3>Copia de seguridad segura de sus datos</h3>
22
-
23
- <h3>Administrar y restaurar sus archivos</h3>
24
- <p>Base-1.apk también le permite administrar y restaurar sus archivos desde el servicio de almacenamiento en la nube. Puede ver, editar, eliminar o compartir sus archivos desde la interfaz de la aplicación. También puede restaurar sus archivos a su dispositivo u otro dispositivo en caso de cualquier emergencia. Puede seleccionar qué archivos y carpetas desea restaurar y elegir la carpeta de destino en su dispositivo. También puede restaurar sus archivos a su ubicación original o una nueva ubicación. </p>
25
- <p></p>
26
- <h3>Organiza y clasifica tus carpetas</h3>
27
- <p>Otra característica útil de Base-1.apk es que te ayuda a organizar y clasificar tus carpetas según diferentes categor��as, como fotos, videos, música, documentos, etc. También puedes crear carpetas y etiquetas personalizadas para tus archivos. De esta manera, puede encontrar y acceder fácilmente a sus archivos sin perder tiempo o espacio. También puede ordenar sus archivos por nombre, fecha, tamaño o tipo. </p>
28
- <h2>Conclusión</h2>
29
- <p>Base-1.apk es una aplicación Android potente y práctica que le ayuda a proteger y hacer copias de seguridad de sus datos importantes en su dispositivo. Al usar esta aplicación, puede administrar y restaurar fácilmente todos sus archivos en una ubicación, reduciendo la posibilidad de que pierda cualquier información valiosa. También puede organizar y clasificar sus archivos en carpetas para una mejor administración. </p>
30
- <h3>Resumen de los puntos principales</h3>
31
- <p>En este artículo, hemos explicado lo que es Base-1.apk, cómo descargarlo e instalarlo, y cómo usarlo de manera efectiva. Hemos cubierto los siguientes puntos:</p>
32
- <ul>
33
- <li> Un archivo APK es el formato de archivo de paquete que Android utiliza para distribuir e instalar aplicaciones. </li>
34
- <li>Base-1.apk es la versión original, sin modificar de una aplicación Android llamada Base.</li>
35
- <li> Base es una herramienta que hace que sea simple y práctico para proteger y respaldar sus datos importantes. </li>
36
- <li> Puede descargar Base-1.apk desde el sitio web oficial, una tienda de aplicaciones de terceros, o un enlace directo. </li>
37
-
38
- </ul>
39
- <h3>Llamada a la acción</h3>
40
- <p>Si estás interesado en probar Base-1.apk por ti mismo, puedes descargarlo desde <a href="">baseapk.in</a> y seguir las instrucciones de instalación. También puede consultar la sección de preguntas frecuentes a continuación para obtener más información sobre la aplicación. Esperamos que disfrute usando Base-1.apk y lo encuentre útil para proteger y hacer copias de seguridad de sus datos. </p>
41
- <h2>Preguntas frecuentes</h2>
42
- <p>Aquí están algunas de las preguntas más comunes que los usuarios tienen sobre Base-1.apk:</p>
43
- <h4>Q: ¿Es seguro usar Base-1.apk? </h4>
44
- <p>A: Sí, Base-1.apk es seguro de usar, siempre y cuando se descarga desde el sitio web oficial o una fuente de confianza. Sin embargo, siempre debes escanear cualquier archivo APK con una aplicación antivirus antes de instalarlo en tu dispositivo. </p>
45
- <h4>Q: ¿Cuánto espacio ocupa Base-1.apk en mi dispositivo? </h4>
46
- <p>A: Base-1.apk ocupa unos 15 MB de espacio en su dispositivo. Sin embargo, el tamaño real puede variar dependiendo de la versión de la aplicación y el modelo del dispositivo. </p>
47
- <h4>Q: ¿Cuánto espacio de almacenamiento en la nube ofrece Base-1.apk? </h4>
48
- <p>A: Base-1.apk no ofrece ningún espacio de almacenamiento en la nube por sí mismo. Utiliza el servicio de almacenamiento en la nube que elija para realizar copias de seguridad de sus datos, como Google Drive, Dropbox o OneDrive. La cantidad de espacio de almacenamiento en la nube que obtiene depende del proveedor de servicios y del plan que tenga. </p>
49
- <h4>Q: ¿Puedo usar Base-1.apk en varios dispositivos? </h4>
50
- <p>A: Sí, puedes usar Base-1.apk en varios dispositivos siempre y cuando ejecuten Android 4.0 o superior. Solo necesitas descargar e instalar la aplicación en <p>A: Sí, puedes usar Base-1.apk en varios dispositivos siempre y cuando ejecuten Android 4.0 o superior. Solo tienes que descargar e instalar la aplicación en cada dispositivo e iniciar sesión con la misma cuenta. A continuación, puede acceder y restaurar sus archivos desde cualquier dispositivo. </p>
51
- <h4>Q: ¿Qué pasa si olvido mi contraseña para Base-1.apk? </h4> 64aa2da5cf<br />
52
- <br />
53
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/build_py.py DELETED
@@ -1,407 +0,0 @@
1
- """distutils.command.build_py
2
-
3
- Implements the Distutils 'build_py' command."""
4
-
5
- import os
6
- import importlib.util
7
- import sys
8
- import glob
9
-
10
- from distutils.core import Command
11
- from distutils.errors import DistutilsOptionError, DistutilsFileError
12
- from distutils.util import convert_path
13
- from distutils import log
14
-
15
-
16
- class build_py(Command):
17
-
18
- description = "\"build\" pure Python modules (copy to build directory)"
19
-
20
- user_options = [
21
- ('build-lib=', 'd', "directory to \"build\" (copy) to"),
22
- ('compile', 'c', "compile .py to .pyc"),
23
- ('no-compile', None, "don't compile .py files [default]"),
24
- (
25
- 'optimize=',
26
- 'O',
27
- "also compile with optimization: -O1 for \"python -O\", "
28
- "-O2 for \"python -OO\", and -O0 to disable [default: -O0]",
29
- ),
30
- ('force', 'f', "forcibly build everything (ignore file timestamps)"),
31
- ]
32
-
33
- boolean_options = ['compile', 'force']
34
- negative_opt = {'no-compile': 'compile'}
35
-
36
- def initialize_options(self):
37
- self.build_lib = None
38
- self.py_modules = None
39
- self.package = None
40
- self.package_data = None
41
- self.package_dir = None
42
- self.compile = 0
43
- self.optimize = 0
44
- self.force = None
45
-
46
- def finalize_options(self):
47
- self.set_undefined_options(
48
- 'build', ('build_lib', 'build_lib'), ('force', 'force')
49
- )
50
-
51
- # Get the distribution options that are aliases for build_py
52
- # options -- list of packages and list of modules.
53
- self.packages = self.distribution.packages
54
- self.py_modules = self.distribution.py_modules
55
- self.package_data = self.distribution.package_data
56
- self.package_dir = {}
57
- if self.distribution.package_dir:
58
- for name, path in self.distribution.package_dir.items():
59
- self.package_dir[name] = convert_path(path)
60
- self.data_files = self.get_data_files()
61
-
62
- # Ick, copied straight from install_lib.py (fancy_getopt needs a
63
- # type system! Hell, *everything* needs a type system!!!)
64
- if not isinstance(self.optimize, int):
65
- try:
66
- self.optimize = int(self.optimize)
67
- assert 0 <= self.optimize <= 2
68
- except (ValueError, AssertionError):
69
- raise DistutilsOptionError("optimize must be 0, 1, or 2")
70
-
71
- def run(self):
72
- # XXX copy_file by default preserves atime and mtime. IMHO this is
73
- # the right thing to do, but perhaps it should be an option -- in
74
- # particular, a site administrator might want installed files to
75
- # reflect the time of installation rather than the last
76
- # modification time before the installed release.
77
-
78
- # XXX copy_file by default preserves mode, which appears to be the
79
- # wrong thing to do: if a file is read-only in the working
80
- # directory, we want it to be installed read/write so that the next
81
- # installation of the same module distribution can overwrite it
82
- # without problems. (This might be a Unix-specific issue.) Thus
83
- # we turn off 'preserve_mode' when copying to the build directory,
84
- # since the build directory is supposed to be exactly what the
85
- # installation will look like (ie. we preserve mode when
86
- # installing).
87
-
88
- # Two options control which modules will be installed: 'packages'
89
- # and 'py_modules'. The former lets us work with whole packages, not
90
- # specifying individual modules at all; the latter is for
91
- # specifying modules one-at-a-time.
92
-
93
- if self.py_modules:
94
- self.build_modules()
95
- if self.packages:
96
- self.build_packages()
97
- self.build_package_data()
98
-
99
- self.byte_compile(self.get_outputs(include_bytecode=0))
100
-
101
- def get_data_files(self):
102
- """Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
103
- data = []
104
- if not self.packages:
105
- return data
106
- for package in self.packages:
107
- # Locate package source directory
108
- src_dir = self.get_package_dir(package)
109
-
110
- # Compute package build directory
111
- build_dir = os.path.join(*([self.build_lib] + package.split('.')))
112
-
113
- # Length of path to strip from found files
114
- plen = 0
115
- if src_dir:
116
- plen = len(src_dir) + 1
117
-
118
- # Strip directory from globbed filenames
119
- filenames = [file[plen:] for file in self.find_data_files(package, src_dir)]
120
- data.append((package, src_dir, build_dir, filenames))
121
- return data
122
-
123
- def find_data_files(self, package, src_dir):
124
- """Return filenames for package's data files in 'src_dir'"""
125
- globs = self.package_data.get('', []) + self.package_data.get(package, [])
126
- files = []
127
- for pattern in globs:
128
- # Each pattern has to be converted to a platform-specific path
129
- filelist = glob.glob(
130
- os.path.join(glob.escape(src_dir), convert_path(pattern))
131
- )
132
- # Files that match more than one pattern are only added once
133
- files.extend(
134
- [fn for fn in filelist if fn not in files and os.path.isfile(fn)]
135
- )
136
- return files
137
-
138
- def build_package_data(self):
139
- """Copy data files into build directory"""
140
- for package, src_dir, build_dir, filenames in self.data_files:
141
- for filename in filenames:
142
- target = os.path.join(build_dir, filename)
143
- self.mkpath(os.path.dirname(target))
144
- self.copy_file(
145
- os.path.join(src_dir, filename), target, preserve_mode=False
146
- )
147
-
148
- def get_package_dir(self, package):
149
- """Return the directory, relative to the top of the source
150
- distribution, where package 'package' should be found
151
- (at least according to the 'package_dir' option, if any)."""
152
- path = package.split('.')
153
-
154
- if not self.package_dir:
155
- if path:
156
- return os.path.join(*path)
157
- else:
158
- return ''
159
- else:
160
- tail = []
161
- while path:
162
- try:
163
- pdir = self.package_dir['.'.join(path)]
164
- except KeyError:
165
- tail.insert(0, path[-1])
166
- del path[-1]
167
- else:
168
- tail.insert(0, pdir)
169
- return os.path.join(*tail)
170
- else:
171
- # Oops, got all the way through 'path' without finding a
172
- # match in package_dir. If package_dir defines a directory
173
- # for the root (nameless) package, then fallback on it;
174
- # otherwise, we might as well have not consulted
175
- # package_dir at all, as we just use the directory implied
176
- # by 'tail' (which should be the same as the original value
177
- # of 'path' at this point).
178
- pdir = self.package_dir.get('')
179
- if pdir is not None:
180
- tail.insert(0, pdir)
181
-
182
- if tail:
183
- return os.path.join(*tail)
184
- else:
185
- return ''
186
-
187
- def check_package(self, package, package_dir):
188
- # Empty dir name means current directory, which we can probably
189
- # assume exists. Also, os.path.exists and isdir don't know about
190
- # my "empty string means current dir" convention, so we have to
191
- # circumvent them.
192
- if package_dir != "":
193
- if not os.path.exists(package_dir):
194
- raise DistutilsFileError(
195
- "package directory '%s' does not exist" % package_dir
196
- )
197
- if not os.path.isdir(package_dir):
198
- raise DistutilsFileError(
199
- "supposed package directory '%s' exists, "
200
- "but is not a directory" % package_dir
201
- )
202
-
203
- # Directories without __init__.py are namespace packages (PEP 420).
204
- if package:
205
- init_py = os.path.join(package_dir, "__init__.py")
206
- if os.path.isfile(init_py):
207
- return init_py
208
-
209
- # Either not in a package at all (__init__.py not expected), or
210
- # __init__.py doesn't exist -- so don't return the filename.
211
- return None
212
-
213
- def check_module(self, module, module_file):
214
- if not os.path.isfile(module_file):
215
- log.warn("file %s (for module %s) not found", module_file, module)
216
- return False
217
- else:
218
- return True
219
-
220
- def find_package_modules(self, package, package_dir):
221
- self.check_package(package, package_dir)
222
- module_files = glob.glob(os.path.join(glob.escape(package_dir), "*.py"))
223
- modules = []
224
- setup_script = os.path.abspath(self.distribution.script_name)
225
-
226
- for f in module_files:
227
- abs_f = os.path.abspath(f)
228
- if abs_f != setup_script:
229
- module = os.path.splitext(os.path.basename(f))[0]
230
- modules.append((package, module, f))
231
- else:
232
- self.debug_print("excluding %s" % setup_script)
233
- return modules
234
-
235
- def find_modules(self):
236
- """Finds individually-specified Python modules, ie. those listed by
237
- module name in 'self.py_modules'. Returns a list of tuples (package,
238
- module_base, filename): 'package' is a tuple of the path through
239
- package-space to the module; 'module_base' is the bare (no
240
- packages, no dots) module name, and 'filename' is the path to the
241
- ".py" file (relative to the distribution root) that implements the
242
- module.
243
- """
244
- # Map package names to tuples of useful info about the package:
245
- # (package_dir, checked)
246
- # package_dir - the directory where we'll find source files for
247
- # this package
248
- # checked - true if we have checked that the package directory
249
- # is valid (exists, contains __init__.py, ... ?)
250
- packages = {}
251
-
252
- # List of (package, module, filename) tuples to return
253
- modules = []
254
-
255
- # We treat modules-in-packages almost the same as toplevel modules,
256
- # just the "package" for a toplevel is empty (either an empty
257
- # string or empty list, depending on context). Differences:
258
- # - don't check for __init__.py in directory for empty package
259
- for module in self.py_modules:
260
- path = module.split('.')
261
- package = '.'.join(path[0:-1])
262
- module_base = path[-1]
263
-
264
- try:
265
- (package_dir, checked) = packages[package]
266
- except KeyError:
267
- package_dir = self.get_package_dir(package)
268
- checked = 0
269
-
270
- if not checked:
271
- init_py = self.check_package(package, package_dir)
272
- packages[package] = (package_dir, 1)
273
- if init_py:
274
- modules.append((package, "__init__", init_py))
275
-
276
- # XXX perhaps we should also check for just .pyc files
277
- # (so greedy closed-source bastards can distribute Python
278
- # modules too)
279
- module_file = os.path.join(package_dir, module_base + ".py")
280
- if not self.check_module(module, module_file):
281
- continue
282
-
283
- modules.append((package, module_base, module_file))
284
-
285
- return modules
286
-
287
- def find_all_modules(self):
288
- """Compute the list of all modules that will be built, whether
289
- they are specified one-module-at-a-time ('self.py_modules') or
290
- by whole packages ('self.packages'). Return a list of tuples
291
- (package, module, module_file), just like 'find_modules()' and
292
- 'find_package_modules()' do."""
293
- modules = []
294
- if self.py_modules:
295
- modules.extend(self.find_modules())
296
- if self.packages:
297
- for package in self.packages:
298
- package_dir = self.get_package_dir(package)
299
- m = self.find_package_modules(package, package_dir)
300
- modules.extend(m)
301
- return modules
302
-
303
- def get_source_files(self):
304
- return [module[-1] for module in self.find_all_modules()]
305
-
306
- def get_module_outfile(self, build_dir, package, module):
307
- outfile_path = [build_dir] + list(package) + [module + ".py"]
308
- return os.path.join(*outfile_path)
309
-
310
- def get_outputs(self, include_bytecode=1):
311
- modules = self.find_all_modules()
312
- outputs = []
313
- for (package, module, module_file) in modules:
314
- package = package.split('.')
315
- filename = self.get_module_outfile(self.build_lib, package, module)
316
- outputs.append(filename)
317
- if include_bytecode:
318
- if self.compile:
319
- outputs.append(
320
- importlib.util.cache_from_source(filename, optimization='')
321
- )
322
- if self.optimize > 0:
323
- outputs.append(
324
- importlib.util.cache_from_source(
325
- filename, optimization=self.optimize
326
- )
327
- )
328
-
329
- outputs += [
330
- os.path.join(build_dir, filename)
331
- for package, src_dir, build_dir, filenames in self.data_files
332
- for filename in filenames
333
- ]
334
-
335
- return outputs
336
-
337
- def build_module(self, module, module_file, package):
338
- if isinstance(package, str):
339
- package = package.split('.')
340
- elif not isinstance(package, (list, tuple)):
341
- raise TypeError(
342
- "'package' must be a string (dot-separated), list, or tuple"
343
- )
344
-
345
- # Now put the module source file into the "build" area -- this is
346
- # easy, we just copy it somewhere under self.build_lib (the build
347
- # directory for Python source).
348
- outfile = self.get_module_outfile(self.build_lib, package, module)
349
- dir = os.path.dirname(outfile)
350
- self.mkpath(dir)
351
- return self.copy_file(module_file, outfile, preserve_mode=0)
352
-
353
- def build_modules(self):
354
- modules = self.find_modules()
355
- for (package, module, module_file) in modules:
356
- # Now "build" the module -- ie. copy the source file to
357
- # self.build_lib (the build directory for Python source).
358
- # (Actually, it gets copied to the directory for this package
359
- # under self.build_lib.)
360
- self.build_module(module, module_file, package)
361
-
362
- def build_packages(self):
363
- for package in self.packages:
364
- # Get list of (package, module, module_file) tuples based on
365
- # scanning the package directory. 'package' is only included
366
- # in the tuple so that 'find_modules()' and
367
- # 'find_package_tuples()' have a consistent interface; it's
368
- # ignored here (apart from a sanity check). Also, 'module' is
369
- # the *unqualified* module name (ie. no dots, no package -- we
370
- # already know its package!), and 'module_file' is the path to
371
- # the .py file, relative to the current directory
372
- # (ie. including 'package_dir').
373
- package_dir = self.get_package_dir(package)
374
- modules = self.find_package_modules(package, package_dir)
375
-
376
- # Now loop over the modules we found, "building" each one (just
377
- # copy it to self.build_lib).
378
- for (package_, module, module_file) in modules:
379
- assert package == package_
380
- self.build_module(module, module_file, package)
381
-
382
- def byte_compile(self, files):
383
- if sys.dont_write_bytecode:
384
- self.warn('byte-compiling is disabled, skipping.')
385
- return
386
-
387
- from distutils.util import byte_compile
388
-
389
- prefix = self.build_lib
390
- if prefix[-1] != os.sep:
391
- prefix = prefix + os.sep
392
-
393
- # XXX this code is essentially the same as the 'byte_compile()
394
- # method of the "install_lib" command, except for the determination
395
- # of the 'prefix' string. Hmmm.
396
- if self.compile:
397
- byte_compile(
398
- files, optimize=0, force=self.force, prefix=prefix, dry_run=self.dry_run
399
- )
400
- if self.optimize > 0:
401
- byte_compile(
402
- files,
403
- optimize=self.optimize,
404
- force=self.force,
405
- prefix=prefix,
406
- dry_run=self.dry_run,
407
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/util/queue.py DELETED
@@ -1,22 +0,0 @@
1
- import collections
2
-
3
- from ..packages import six
4
- from ..packages.six.moves import queue
5
-
6
- if six.PY2:
7
- # Queue is imported for side effects on MS Windows. See issue #229.
8
- import Queue as _unused_module_Queue # noqa: F401
9
-
10
-
11
- class LifoQueue(queue.Queue):
12
- def _init(self, _):
13
- self.queue = collections.deque()
14
-
15
- def _qsize(self, len=len):
16
- return len(self.queue)
17
-
18
- def _put(self, item):
19
- self.queue.append(item)
20
-
21
- def _get(self):
22
- return self.queue.pop()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h DELETED
@@ -1,115 +0,0 @@
1
- // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- #pragma once
3
- #include <torch/types.h>
4
-
5
- namespace detectron2 {
6
-
7
- at::Tensor ROIAlignRotated_forward_cpu(
8
- const at::Tensor& input,
9
- const at::Tensor& rois,
10
- const float spatial_scale,
11
- const int pooled_height,
12
- const int pooled_width,
13
- const int sampling_ratio);
14
-
15
- at::Tensor ROIAlignRotated_backward_cpu(
16
- const at::Tensor& grad,
17
- const at::Tensor& rois,
18
- const float spatial_scale,
19
- const int pooled_height,
20
- const int pooled_width,
21
- const int batch_size,
22
- const int channels,
23
- const int height,
24
- const int width,
25
- const int sampling_ratio);
26
-
27
- #ifdef WITH_CUDA
28
- at::Tensor ROIAlignRotated_forward_cuda(
29
- const at::Tensor& input,
30
- const at::Tensor& rois,
31
- const float spatial_scale,
32
- const int pooled_height,
33
- const int pooled_width,
34
- const int sampling_ratio);
35
-
36
- at::Tensor ROIAlignRotated_backward_cuda(
37
- const at::Tensor& grad,
38
- const at::Tensor& rois,
39
- const float spatial_scale,
40
- const int pooled_height,
41
- const int pooled_width,
42
- const int batch_size,
43
- const int channels,
44
- const int height,
45
- const int width,
46
- const int sampling_ratio);
47
- #endif
48
-
49
- // Interface for Python
50
- inline at::Tensor ROIAlignRotated_forward(
51
- const at::Tensor& input,
52
- const at::Tensor& rois,
53
- const float spatial_scale,
54
- const int pooled_height,
55
- const int pooled_width,
56
- const int sampling_ratio) {
57
- if (input.type().is_cuda()) {
58
- #ifdef WITH_CUDA
59
- return ROIAlignRotated_forward_cuda(
60
- input,
61
- rois,
62
- spatial_scale,
63
- pooled_height,
64
- pooled_width,
65
- sampling_ratio);
66
- #else
67
- AT_ERROR("Not compiled with GPU support");
68
- #endif
69
- }
70
- return ROIAlignRotated_forward_cpu(
71
- input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio);
72
- }
73
-
74
- inline at::Tensor ROIAlignRotated_backward(
75
- const at::Tensor& grad,
76
- const at::Tensor& rois,
77
- const float spatial_scale,
78
- const int pooled_height,
79
- const int pooled_width,
80
- const int batch_size,
81
- const int channels,
82
- const int height,
83
- const int width,
84
- const int sampling_ratio) {
85
- if (grad.type().is_cuda()) {
86
- #ifdef WITH_CUDA
87
- return ROIAlignRotated_backward_cuda(
88
- grad,
89
- rois,
90
- spatial_scale,
91
- pooled_height,
92
- pooled_width,
93
- batch_size,
94
- channels,
95
- height,
96
- width,
97
- sampling_ratio);
98
- #else
99
- AT_ERROR("Not compiled with GPU support");
100
- #endif
101
- }
102
- return ROIAlignRotated_backward_cpu(
103
- grad,
104
- rois,
105
- spatial_scale,
106
- pooled_height,
107
- pooled_width,
108
- batch_size,
109
- channels,
110
- height,
111
- width,
112
- sampling_ratio);
113
- }
114
-
115
- } // namespace detectron2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/device_new.h DELETED
@@ -1,88 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- /*! \file device_new.h
19
- * \brief Constructs new elements in device memory
20
- */
21
-
22
- #pragma once
23
-
24
- #include <thrust/detail/config.h>
25
-
26
- // #include this for size_t
27
- #include <cstddef>
28
- #include <thrust/device_ptr.h>
29
-
30
- namespace thrust
31
- {
32
-
33
- /*!
34
- * \addtogroup allocation_functions Allocation Functions
35
- * \{
36
- */
37
-
38
- /*! \p device_new implements the placement \c new operator for types
39
- * resident in device memory. \p device_new calls <tt>T</tt>'s null
40
- * constructor on a array of objects in device memory.
41
- * No memory is allocated by this function.
42
- *
43
- * \param p A \p device_ptr to a region of device memory into which
44
- * to construct one or many <tt>T</tt>s.
45
- * \param n The number of objects to construct at \p p.
46
- * \return p, casted to <tt>T</tt>'s type.
47
- *
48
- * \see device_ptr
49
- */
50
- template <typename T>
51
- device_ptr<T> device_new(device_ptr<void> p,
52
- const size_t n = 1);
53
-
54
- /*! \p device_new implements the placement new operator for types
55
- * resident in device memory. \p device_new calls <tt>T</tt>'s copy
56
- * constructor on a array of objects in device memory. No memory is
57
- * allocated by this function.
58
- *
59
- * \param p A \p device_ptr to a region of device memory into which to
60
- * construct one or many <tt>T</tt>s.
61
- * \param exemplar The value from which to copy.
62
- * \param n The number of objects to construct at \p p.
63
- * \return p, casted to <tt>T</tt>'s type.
64
- *
65
- * \see device_ptr
66
- * \see fill
67
- */
68
- template <typename T>
69
- device_ptr<T> device_new(device_ptr<void> p,
70
- const T &exemplar,
71
- const size_t n = 1);
72
-
73
- /*! \p device_new implements the new operator for types resident in device memory.
74
- * It allocates device memory large enough to hold \p n new objects of type \c T.
75
- *
76
- * \param n The number of objects to allocate. Defaults to \c 1.
77
- * \return A \p device_ptr to the newly allocated region of device memory.
78
- */
79
- template <typename T>
80
- device_ptr<T> device_new(const size_t n = 1);
81
-
82
- /*! \}
83
- */
84
-
85
- } // end thrust
86
-
87
- #include <thrust/detail/device_new.inl>
88
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/par_to_seq.h DELETED
@@ -1,91 +0,0 @@
1
- /******************************************************************************
2
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
- *
4
- * Redistribution and use in source and binary forms, with or without
5
- * modification, are permitted provided that the following conditions are met:
6
- * * Redistributions of source code must retain the above copyright
7
- * notice, this list of conditions and the following disclaimer.
8
- * * Redistributions in binary form must reproduce the above copyright
9
- * notice, this list of conditions and the following disclaimer in the
10
- * documentation and/or other materials provided with the distribution.
11
- * * Neither the name of the NVIDIA CORPORATION nor the
12
- * names of its contributors may be used to endorse or promote products
13
- * derived from this software without specific prior written permission.
14
- *
15
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
- * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
- *
26
- ******************************************************************************/
27
- #pragma once
28
-
29
- #include <thrust/detail/seq.h>
30
- #include <thrust/system/cuda/detail/par.h>
31
-
32
- namespace thrust
33
- {
34
- namespace cuda_cub {
35
-
36
- template <int PAR>
37
- struct has_par : thrust::detail::true_type {};
38
-
39
- template <>
40
- struct has_par<0> : thrust::detail::false_type {};
41
-
42
- template<class Policy>
43
- struct cvt_to_seq_impl
44
- {
45
- typedef thrust::detail::seq_t seq_t;
46
-
47
- static seq_t __host__ __device__
48
- doit(Policy&)
49
- {
50
- return seq_t();
51
- }
52
- }; // cvt_to_seq_impl
53
-
54
- #if 0
55
- template <class Allocator>
56
- struct cvt_to_seq_impl<
57
- thrust::detail::execute_with_allocator<Allocator,
58
- execute_on_stream_base> >
59
- {
60
- typedef thrust::detail::execute_with_allocator<Allocator,
61
- execute_on_stream_base>
62
- Policy;
63
- typedef thrust::detail::execute_with_allocator<
64
- Allocator,
65
- thrust::system::detail::sequential::execution_policy>
66
- seq_t;
67
-
68
-
69
- static seq_t __host__ __device__
70
- doit(Policy& policy)
71
- {
72
- return seq_t(policy.m_alloc);
73
- }
74
- }; // specialization of struct cvt_to_seq_impl
75
- #endif
76
-
77
- template <class Policy>
78
- typename cvt_to_seq_impl<Policy>::seq_t __host__ __device__
79
- cvt_to_seq(Policy& policy)
80
- {
81
- return cvt_to_seq_impl<Policy>::doit(policy);
82
- }
83
-
84
- #if __THRUST_HAS_CUDART__
85
- #define THRUST_CUDART_DISPATCH par
86
- #else
87
- #define THRUST_CUDART_DISPATCH seq
88
- #endif
89
-
90
- } // namespace cuda_
91
- } // end namespace thrust
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/uninitialized_fill.h DELETED
@@ -1,275 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- /*! \file uninitialized_fill.h
19
- * \brief Copy construction into a range of uninitialized elements from a source value
20
- */
21
-
22
- #pragma once
23
-
24
- #include <thrust/detail/config.h>
25
- #include <thrust/detail/execution_policy.h>
26
-
27
- namespace thrust
28
- {
29
-
30
-
31
- /*! \addtogroup filling
32
- * \ingroup transformations
33
- * \{
34
- */
35
-
36
-
37
- /*! In \c thrust, the function \c thrust::device_new allocates memory for
38
- * an object and then creates an object at that location by calling a
39
- * constructor. Occasionally, however, it is useful to separate those two
40
- * operations. If each iterator in the range <tt>[first, last)</tt> points
41
- * to uninitialized memory, then \p uninitialized_fill creates copies of \c x
42
- * in that range. That is, for each iterator \c i in the range <tt>[first, last)</tt>,
43
- * \p uninitialized_fill creates a copy of \c x in the location pointed to \c i by
44
- * calling \p ForwardIterator's \c value_type's copy constructor.
45
- *
46
- * The algorithm's execution is parallelized as determined by \p exec.
47
- *
48
- * \param exec The execution policy to use for parallelization.
49
- * \param first The first element of the range of interest.
50
- * \param last The last element of the range of interest.
51
- * \param x The value to use as the exemplar of the copy constructor.
52
- *
53
- * \tparam DerivedPolicy The name of the derived execution policy.
54
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator">Forward Iterator</a>,
55
- * \p ForwardIterator is mutable, and \p ForwardIterator's \c value_type has a constructor that
56
- * takes a single argument of type \p T.
57
- *
58
- * The following code snippet demonstrates how to use \p uninitialized_fill to initialize a range of
59
- * uninitialized memory using the \p thrust::device execution policy for parallelization:
60
- *
61
- * \code
62
- * #include <thrust/uninitialized_fill.h>
63
- * #include <thrust/device_malloc.h>
64
- * #include <thrust/execution_policy.h>
65
- *
66
- * struct Int
67
- * {
68
- * __host__ __device__
69
- * Int(int x) : val(x) {}
70
- * int val;
71
- * };
72
- * ...
73
- * const int N = 137;
74
- *
75
- * Int val(46);
76
- * thrust::device_ptr<Int> array = thrust::device_malloc<Int>(N);
77
- * thrust::uninitialized_fill(thrust::device, array, array + N, val);
78
- *
79
- * // Int x = array[i];
80
- * // x.val == 46 for all 0 <= i < N
81
- * \endcode
82
- *
83
- * \see http://www.sgi.com/tech/stl/uninitialized_fill.html
84
- * \see \c uninitialized_fill_n
85
- * \see \c fill
86
- * \see \c uninitialized_copy
87
- * \see \c device_new
88
- * \see \c device_malloc
89
- */
90
- template<typename DerivedPolicy, typename ForwardIterator, typename T>
91
- __host__ __device__
92
- void uninitialized_fill(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
93
- ForwardIterator first,
94
- ForwardIterator last,
95
- const T &x);
96
-
97
-
98
- /*! In \c thrust, the function \c thrust::device_new allocates memory for
99
- * an object and then creates an object at that location by calling a
100
- * constructor. Occasionally, however, it is useful to separate those two
101
- * operations. If each iterator in the range <tt>[first, last)</tt> points
102
- * to uninitialized memory, then \p uninitialized_fill creates copies of \c x
103
- * in that range. That is, for each iterator \c i in the range <tt>[first, last)</tt>,
104
- * \p uninitialized_fill creates a copy of \c x in the location pointed to \c i by
105
- * calling \p ForwardIterator's \c value_type's copy constructor.
106
- *
107
- * \param first The first element of the range of interest.
108
- * \param last The last element of the range of interest.
109
- * \param x The value to use as the exemplar of the copy constructor.
110
- *
111
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator">Forward Iterator</a>,
112
- * \p ForwardIterator is mutable, and \p ForwardIterator's \c value_type has a constructor that
113
- * takes a single argument of type \p T.
114
- *
115
- * The following code snippet demonstrates how to use \p uninitialized_fill to initialize a range of
116
- * uninitialized memory.
117
- *
118
- * \code
119
- * #include <thrust/uninitialized_fill.h>
120
- * #include <thrust/device_malloc.h>
121
- *
122
- * struct Int
123
- * {
124
- * __host__ __device__
125
- * Int(int x) : val(x) {}
126
- * int val;
127
- * };
128
- * ...
129
- * const int N = 137;
130
- *
131
- * Int val(46);
132
- * thrust::device_ptr<Int> array = thrust::device_malloc<Int>(N);
133
- * thrust::uninitialized_fill(array, array + N, val);
134
- *
135
- * // Int x = array[i];
136
- * // x.val == 46 for all 0 <= i < N
137
- * \endcode
138
- *
139
- * \see http://www.sgi.com/tech/stl/uninitialized_fill.html
140
- * \see \c uninitialized_fill_n
141
- * \see \c fill
142
- * \see \c uninitialized_copy
143
- * \see \c device_new
144
- * \see \c device_malloc
145
- */
146
- template<typename ForwardIterator, typename T>
147
- void uninitialized_fill(ForwardIterator first,
148
- ForwardIterator last,
149
- const T &x);
150
-
151
-
152
- /*! In \c thrust, the function \c thrust::device_new allocates memory for
153
- * an object and then creates an object at that location by calling a
154
- * constructor. Occasionally, however, it is useful to separate those two
155
- * operations. If each iterator in the range <tt>[first, first+n)</tt> points
156
- * to uninitialized memory, then \p uninitialized_fill creates copies of \c x
157
- * in that range. That is, for each iterator \c i in the range <tt>[first, first+n)</tt>,
158
- * \p uninitialized_fill creates a copy of \c x in the location pointed to \c i by
159
- * calling \p ForwardIterator's \c value_type's copy constructor.
160
- *
161
- * The algorithm's execution is parallelized as determined by \p exec.
162
- *
163
- * \param exec The execution policy to use for parallelization.
164
- * \param first The first element of the range of interest.
165
- * \param n The size of the range of interest.
166
- * \param x The value to use as the exemplar of the copy constructor.
167
- * \return <tt>first+n</tt>
168
- *
169
- * \tparam DerivedPolicy The name of the derived execution policy.
170
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator">Forward Iterator</a>,
171
- * \p ForwardIterator is mutable, and \p ForwardIterator's \c value_type has a constructor that
172
- * takes a single argument of type \p T.
173
- *
174
- * The following code snippet demonstrates how to use \p uninitialized_fill to initialize a range of
175
- * uninitialized memory using the \p thrust::device execution policy for parallelization:
176
- *
177
- * \code
178
- * #include <thrust/uninitialized_fill.h>
179
- * #include <thrust/device_malloc.h>
180
- * #include <thrust/execution_policy.h>
181
- *
182
- * struct Int
183
- * {
184
- * __host__ __device__
185
- * Int(int x) : val(x) {}
186
- * int val;
187
- * };
188
- * ...
189
- * const int N = 137;
190
- *
191
- * Int val(46);
192
- * thrust::device_ptr<Int> array = thrust::device_malloc<Int>(N);
193
- * thrust::uninitialized_fill_n(thrust::device, array, N, val);
194
- *
195
- * // Int x = array[i];
196
- * // x.val == 46 for all 0 <= i < N
197
- * \endcode
198
- *
199
- * \see http://www.sgi.com/tech/stl/uninitialized_fill.html
200
- * \see \c uninitialized_fill
201
- * \see \c fill
202
- * \see \c uninitialized_copy_n
203
- * \see \c device_new
204
- * \see \c device_malloc
205
- */
206
- template<typename DerivedPolicy, typename ForwardIterator, typename Size, typename T>
207
- __host__ __device__
208
- ForwardIterator uninitialized_fill_n(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
209
- ForwardIterator first,
210
- Size n,
211
- const T &x);
212
-
213
-
214
- /*! In \c thrust, the function \c thrust::device_new allocates memory for
215
- * an object and then creates an object at that location by calling a
216
- * constructor. Occasionally, however, it is useful to separate those two
217
- * operations. If each iterator in the range <tt>[first, first+n)</tt> points
218
- * to uninitialized memory, then \p uninitialized_fill creates copies of \c x
219
- * in that range. That is, for each iterator \c i in the range <tt>[first, first+n)</tt>,
220
- * \p uninitialized_fill creates a copy of \c x in the location pointed to \c i by
221
- * calling \p ForwardIterator's \c value_type's copy constructor.
222
- *
223
- * \param first The first element of the range of interest.
224
- * \param n The size of the range of interest.
225
- * \param x The value to use as the exemplar of the copy constructor.
226
- * \return <tt>first+n</tt>
227
- *
228
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator">Forward Iterator</a>,
229
- * \p ForwardIterator is mutable, and \p ForwardIterator's \c value_type has a constructor that
230
- * takes a single argument of type \p T.
231
- *
232
- * The following code snippet demonstrates how to use \p uninitialized_fill to initialize a range of
233
- * uninitialized memory.
234
- *
235
- * \code
236
- * #include <thrust/uninitialized_fill.h>
237
- * #include <thrust/device_malloc.h>
238
- *
239
- * struct Int
240
- * {
241
- * __host__ __device__
242
- * Int(int x) : val(x) {}
243
- * int val;
244
- * };
245
- * ...
246
- * const int N = 137;
247
- *
248
- * Int val(46);
249
- * thrust::device_ptr<Int> array = thrust::device_malloc<Int>(N);
250
- * thrust::uninitialized_fill_n(array, N, val);
251
- *
252
- * // Int x = array[i];
253
- * // x.val == 46 for all 0 <= i < N
254
- * \endcode
255
- *
256
- * \see http://www.sgi.com/tech/stl/uninitialized_fill.html
257
- * \see \c uninitialized_fill
258
- * \see \c fill
259
- * \see \c uninitialized_copy_n
260
- * \see \c device_new
261
- * \see \c device_malloc
262
- */
263
- template<typename ForwardIterator, typename Size, typename T>
264
- ForwardIterator uninitialized_fill_n(ForwardIterator first,
265
- Size n,
266
- const T &x);
267
-
268
- /*! \} // end filling
269
- * \} // transformations
270
- */
271
-
272
- } // end thrust
273
-
274
- #include <thrust/detail/uninitialized_fill.inl>
275
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py DELETED
@@ -1,55 +0,0 @@
1
- import numpy as np
2
- import torch
3
-
4
- from ..builder import BBOX_SAMPLERS
5
- from .random_sampler import RandomSampler
6
-
7
-
8
- @BBOX_SAMPLERS.register_module()
9
- class InstanceBalancedPosSampler(RandomSampler):
10
- """Instance balanced sampler that samples equal number of positive samples
11
- for each instance."""
12
-
13
- def _sample_pos(self, assign_result, num_expected, **kwargs):
14
- """Sample positive boxes.
15
-
16
- Args:
17
- assign_result (:obj:`AssignResult`): The assigned results of boxes.
18
- num_expected (int): The number of expected positive samples
19
-
20
- Returns:
21
- Tensor or ndarray: sampled indices.
22
- """
23
- pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
24
- if pos_inds.numel() != 0:
25
- pos_inds = pos_inds.squeeze(1)
26
- if pos_inds.numel() <= num_expected:
27
- return pos_inds
28
- else:
29
- unique_gt_inds = assign_result.gt_inds[pos_inds].unique()
30
- num_gts = len(unique_gt_inds)
31
- num_per_gt = int(round(num_expected / float(num_gts)) + 1)
32
- sampled_inds = []
33
- for i in unique_gt_inds:
34
- inds = torch.nonzero(
35
- assign_result.gt_inds == i.item(), as_tuple=False)
36
- if inds.numel() != 0:
37
- inds = inds.squeeze(1)
38
- else:
39
- continue
40
- if len(inds) > num_per_gt:
41
- inds = self.random_choice(inds, num_per_gt)
42
- sampled_inds.append(inds)
43
- sampled_inds = torch.cat(sampled_inds)
44
- if len(sampled_inds) < num_expected:
45
- num_extra = num_expected - len(sampled_inds)
46
- extra_inds = np.array(
47
- list(set(pos_inds.cpu()) - set(sampled_inds.cpu())))
48
- if len(extra_inds) > num_extra:
49
- extra_inds = self.random_choice(extra_inds, num_extra)
50
- extra_inds = torch.from_numpy(extra_inds).to(
51
- assign_result.gt_inds.device).long()
52
- sampled_inds = torch.cat([sampled_inds, extra_inds])
53
- elif len(sampled_inds) > num_expected:
54
- sampled_inds = self.random_choice(sampled_inds, num_expected)
55
- return sampled_inds
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/train.py DELETED
@@ -1,191 +0,0 @@
1
- import argparse
2
- import copy
3
- import os
4
- import os.path as osp
5
- import time
6
- import warnings
7
-
8
- import mmcv
9
- import torch
10
- from mmcv import Config, DictAction
11
- from mmcv.runner import get_dist_info, init_dist
12
- from mmcv.utils import get_git_hash
13
-
14
- from mmdet import __version__
15
- from mmdet.apis import set_random_seed
16
- from mmdet.models import build_detector
17
- from mmdet.utils import collect_env, get_root_logger
18
- from walt.apis import train_detector
19
- from walt.datasets import build_dataset
20
-
21
-
22
- def parse_args():
23
- parser = argparse.ArgumentParser(description='Train a detector')
24
- parser.add_argument('config', help='train config file path')
25
- parser.add_argument('--work-dir', help='the dir to save logs and models')
26
- parser.add_argument(
27
- '--resume-from', help='the checkpoint file to resume from')
28
- parser.add_argument(
29
- '--no-validate',
30
- action='store_true',
31
- help='whether not to evaluate the checkpoint during training')
32
- group_gpus = parser.add_mutually_exclusive_group()
33
- group_gpus.add_argument(
34
- '--gpus',
35
- type=int,
36
- help='number of gpus to use '
37
- '(only applicable to non-distributed training)')
38
- group_gpus.add_argument(
39
- '--gpu-ids',
40
- type=int,
41
- nargs='+',
42
- help='ids of gpus to use '
43
- '(only applicable to non-distributed training)')
44
- parser.add_argument('--seed', type=int, default=None, help='random seed')
45
- parser.add_argument(
46
- '--deterministic',
47
- action='store_true',
48
- help='whether to set deterministic options for CUDNN backend.')
49
- parser.add_argument(
50
- '--options',
51
- nargs='+',
52
- action=DictAction,
53
- help='override some settings in the used config, the key-value pair '
54
- 'in xxx=yyy format will be merged into config file (deprecate), '
55
- 'change to --cfg-options instead.')
56
- parser.add_argument(
57
- '--cfg-options',
58
- nargs='+',
59
- action=DictAction,
60
- help='override some settings in the used config, the key-value pair '
61
- 'in xxx=yyy format will be merged into config file. If the value to '
62
- 'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
63
- 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
64
- 'Note that the quotation marks are necessary and that no white space '
65
- 'is allowed.')
66
- parser.add_argument(
67
- '--launcher',
68
- choices=['none', 'pytorch', 'slurm', 'mpi'],
69
- default='none',
70
- help='job launcher')
71
- parser.add_argument('--local_rank', type=int, default=0)
72
- args = parser.parse_args()
73
- if 'LOCAL_RANK' not in os.environ:
74
- os.environ['LOCAL_RANK'] = str(args.local_rank)
75
-
76
- if args.options and args.cfg_options:
77
- raise ValueError(
78
- '--options and --cfg-options cannot be both '
79
- 'specified, --options is deprecated in favor of --cfg-options')
80
- if args.options:
81
- warnings.warn('--options is deprecated in favor of --cfg-options')
82
- args.cfg_options = args.options
83
-
84
- return args
85
-
86
-
87
- def main():
88
- args = parse_args()
89
-
90
- cfg = Config.fromfile(args.config)
91
- if args.cfg_options is not None:
92
- cfg.merge_from_dict(args.cfg_options)
93
- # import modules from string list.
94
- if cfg.get('custom_imports', None):
95
- from mmcv.utils import import_modules_from_strings
96
- import_modules_from_strings(**cfg['custom_imports'])
97
- # set cudnn_benchmark
98
- if cfg.get('cudnn_benchmark', False):
99
- torch.backends.cudnn.benchmark = True
100
-
101
- # work_dir is determined in this priority: CLI > segment in file > filename
102
- if args.work_dir is not None:
103
- # update configs according to CLI args if args.work_dir is not None
104
- cfg.work_dir = args.work_dir
105
- elif cfg.get('work_dir', None) is None:
106
- # use config filename as default work_dir if cfg.work_dir is None
107
- cfg.work_dir = osp.join('./work_dirs',
108
- osp.splitext(osp.basename(args.config))[0])
109
-
110
- if args.resume_from is not None:
111
- cfg.resume_from = args.resume_from
112
- if args.gpu_ids is not None:
113
- cfg.gpu_ids = args.gpu_ids
114
- else:
115
- cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
116
-
117
- # init distributed env first, since logger depends on the dist info.
118
- if args.launcher == 'none':
119
- distributed = False
120
- else:
121
- distributed = True
122
- init_dist(args.launcher, **cfg.dist_params)
123
- # re-set gpu_ids with distributed training mode
124
- _, world_size = get_dist_info()
125
- cfg.gpu_ids = range(world_size)
126
-
127
-
128
- # create work_dir
129
- mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
130
- # dump config
131
- cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
132
- # init the logger before other steps
133
- timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
134
- log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
135
- logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
136
-
137
- # init the meta dict to record some important information such as
138
- # environment info and seed, which will be logged
139
- meta = dict()
140
- # log env info
141
- env_info_dict = collect_env()
142
- env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
143
- dash_line = '-' * 60 + '\n'
144
- logger.info('Environment info:\n' + dash_line + env_info + '\n' +
145
- dash_line)
146
- meta['env_info'] = env_info
147
- meta['config'] = cfg.pretty_text
148
- # log some basic info
149
- logger.info(f'Distributed training: {distributed}')
150
- logger.info(f'Config:\n{cfg.pretty_text}')
151
-
152
- # set random seeds
153
- if args.seed is not None:
154
- logger.info(f'Set random seed to {args.seed}, '
155
- f'deterministic: {args.deterministic}')
156
- set_random_seed(args.seed, deterministic=args.deterministic)
157
- cfg.seed = args.seed
158
- meta['seed'] = args.seed
159
- meta['exp_name'] = osp.basename(args.config)
160
-
161
- model = build_detector(
162
- cfg.model,
163
- train_cfg=cfg.get('train_cfg'),
164
- test_cfg=cfg.get('test_cfg'))
165
-
166
- datasets = [build_dataset(cfg.data.train)]
167
- if len(cfg.workflow) == 2:
168
- val_dataset = copy.deepcopy(cfg.data.val)
169
- val_dataset.pipeline = cfg.data.train.pipeline
170
- datasets.append(build_dataset(val_dataset))
171
- if cfg.checkpoint_config is not None:
172
- # save mmdet version, config file content and class names in
173
- # checkpoints as meta data
174
- cfg.checkpoint_config.meta = dict(
175
- mmdet_version=__version__ + get_git_hash()[:7],
176
- CLASSES=datasets[0].CLASSES)
177
-
178
- # add an attribute for visualization convenience
179
- model.CLASSES = datasets[0].CLASSES
180
- train_detector(
181
- model,
182
- datasets,
183
- cfg,
184
- distributed=distributed,
185
- validate=(not args.no_validate),
186
- timestamp=timestamp,
187
- meta=meta)
188
-
189
-
190
- if __name__ == '__main__':
191
- main()