parquet-converter commited on
Commit
3bf624d
·
1 Parent(s): 4335806

Update parquet files (step 70 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/101-5/gpt4free/g4f/Provider/Providers/Bing.py +0 -356
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/A comparison of plaxis 3d v21 and other geotechnical software.md +0 -17
  3. spaces/1gistliPinn/ChatGPT4/Examples/Aerofly Rc 7 Cracked Pepper -.md +0 -114
  4. spaces/1gistliPinn/ChatGPT4/Examples/Aleo Swf To Gif Converter Full 12 PORTABLE.md +0 -28
  5. spaces/1gistliPinn/ChatGPT4/Examples/Communication Engineering By Js Chitode 62.pdf.md +0 -6
  6. spaces/1line/AutoGPT/autogpt/utils.py +0 -77
  7. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Attack on Titan Fangame by Riva - Offline Mode APK Download.md +0 -134
  8. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Edge 80 The Browser That Gives You More Control and Privacy.md +0 -217
  9. spaces/1phancelerku/anime-remove-background/ 2023 PDF .md +0 -175
  10. spaces/1phancelerku/anime-remove-background/7 Easy Ways to Improve Your Download Speed Right Now.md +0 -97
  11. spaces/1phancelerku/anime-remove-background/Download YouTube Premium APK Mod and Access Exclusive Content Background Play and More.md +0 -107
  12. spaces/2ndelement/voicevox/test/test_synthesis_engine_base.py +0 -411
  13. spaces/4Taps/SadTalker/src/audio2pose_models/audio2pose.py +0 -94
  14. spaces/AIWaves/Debate/src/agents/Environment/__init__.py +0 -1
  15. spaces/Ababababababbababa/poetry2023/README.md +0 -13
  16. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/arcadetcrp.js +0 -11
  17. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/mousewheeltoupdown.d.ts +0 -2
  18. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/holygrail/methods/Build.js +0 -40
  19. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/slider/Slider.js +0 -194
  20. spaces/AlekseyKorshuk/instagram-filter-removal/README.md +0 -37
  21. spaces/AlexWang/lama/bin/extract_masks.py +0 -63
  22. spaces/Alycer/VITS-Umamusume-voice-synthesizer/text/cleaners.py +0 -146
  23. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/controlnet/train_controlnet.py +0 -1127
  24. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_original_controlnet_to_diffusers.py +0 -109
  25. spaces/Andy1621/uniformer_image_detection/configs/fast_rcnn/fast_rcnn_r101_fpn_2x_coco.py +0 -2
  26. spaces/Andy1621/uniformer_image_detection/mmdet/core/evaluation/bbox_overlaps.py +0 -48
  27. spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/bbox_heads/__init__.py +0 -13
  28. spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r18b-d8_512x1024_80k_cityscapes.py +0 -9
  29. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/__init__.py +0 -47
  30. spaces/AnthonyTruchetPoC/persistent-docker/doc/conf.py +0 -52
  31. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/main.py +0 -12
  32. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/metadata/pkg_resources.py +0 -270
  33. spaces/Atualli/yoloxTeste/yoloxdetect2/configs/yolov3.py +0 -33
  34. spaces/Audio-AGI/WavJourney/VoiceParser/pre_kmeans_hubert.py +0 -106
  35. spaces/Bart92/RVC_HF/diffq/base.py +0 -262
  36. spaces/Benson/text-generation/Examples/Barikad Crew Album Goumen Pou Saw Kwe Mp3 Download.md +0 -29
  37. spaces/Benson/text-generation/Examples/Cmo Descargar Sims En Sims 3.md +0 -154
  38. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/packaging/_structures.py +0 -61
  39. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/command/egg_info.py +0 -763
  40. spaces/CVH-vn1210/make_hair/minigpt4/models/blip2.py +0 -221
  41. spaces/CVH-vn1210/make_hair/minigpt4/models/blip2_outputs.py +0 -110
  42. spaces/CVPR/LIVE/thrust/thrust/functional.h +0 -1719
  43. spaces/CVPR/WALT/mmdet/models/losses/ae_loss.py +0 -102
  44. spaces/ChihChiu29/mychatbot/README.md +0 -11
  45. spaces/ChrisPreston/diff-svc_minato_aqua/modules/nsf_hifigan/utils.py +0 -69
  46. spaces/DGSpitzer/DGS-Diffusion-Space/share_btn.py +0 -72
  47. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/_core/_synchronization.py +0 -596
  48. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/dateutil/tz/_factories.py +0 -80
  49. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/feaLib/error.py +0 -22
  50. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-ecdf43f2.js +0 -2
spaces/101-5/gpt4free/g4f/Provider/Providers/Bing.py DELETED
@@ -1,356 +0,0 @@
1
- import os
2
- import json
3
- import random
4
- import json
5
- import os
6
- import uuid
7
- import ssl
8
- import certifi
9
- import aiohttp
10
- import asyncio
11
-
12
- import requests
13
- from ...typing import sha256, Dict, get_type_hints
14
-
15
- url = 'https://bing.com/chat'
16
- model = ['gpt-4']
17
- supports_stream = True
18
- needs_auth = False
19
-
20
- ssl_context = ssl.create_default_context()
21
- ssl_context.load_verify_locations(certifi.where())
22
-
23
-
24
- class optionsSets:
25
- optionSet: dict = {
26
- 'tone': str,
27
- 'optionsSets': list
28
- }
29
-
30
- jailbreak: dict = {
31
- "optionsSets": [
32
- 'saharasugg',
33
- 'enablenewsfc',
34
- 'clgalileo',
35
- 'gencontentv3',
36
- "nlu_direct_response_filter",
37
- "deepleo",
38
- "disable_emoji_spoken_text",
39
- "responsible_ai_policy_235",
40
- "enablemm",
41
- "h3precise"
42
- # "harmonyv3",
43
- "dtappid",
44
- "cricinfo",
45
- "cricinfov2",
46
- "dv3sugg",
47
- "nojbfedge"
48
- ]
49
- }
50
-
51
-
52
- class Defaults:
53
- delimiter = '\x1e'
54
- ip_address = f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}'
55
-
56
- allowedMessageTypes = [
57
- 'Chat',
58
- 'Disengaged',
59
- 'AdsQuery',
60
- 'SemanticSerp',
61
- 'GenerateContentQuery',
62
- 'SearchQuery',
63
- 'ActionRequest',
64
- 'Context',
65
- 'Progress',
66
- 'AdsQuery',
67
- 'SemanticSerp'
68
- ]
69
-
70
- sliceIds = [
71
-
72
- # "222dtappid",
73
- # "225cricinfo",
74
- # "224locals0"
75
-
76
- 'winmuid3tf',
77
- 'osbsdusgreccf',
78
- 'ttstmout',
79
- 'crchatrev',
80
- 'winlongmsgtf',
81
- 'ctrlworkpay',
82
- 'norespwtf',
83
- 'tempcacheread',
84
- 'temptacache',
85
- '505scss0',
86
- '508jbcars0',
87
- '515enbotdets0',
88
- '5082tsports',
89
- '515vaoprvs',
90
- '424dagslnv1s0',
91
- 'kcimgattcf',
92
- '427startpms0'
93
- ]
94
-
95
- location = {
96
- 'locale': 'en-US',
97
- 'market': 'en-US',
98
- 'region': 'US',
99
- 'locationHints': [
100
- {
101
- 'country': 'United States',
102
- 'state': 'California',
103
- 'city': 'Los Angeles',
104
- 'timezoneoffset': 8,
105
- 'countryConfidence': 8,
106
- 'Center': {
107
- 'Latitude': 34.0536909,
108
- 'Longitude': -118.242766
109
- },
110
- 'RegionType': 2,
111
- 'SourceType': 1
112
- }
113
- ],
114
- }
115
-
116
-
117
- def _format(msg: dict) -> str:
118
- return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter
119
-
120
-
121
- async def create_conversation():
122
- for _ in range(5):
123
- create = requests.get('https://www.bing.com/turing/conversation/create',
124
- headers={
125
- 'cookie': '_U=1',
126
- 'accept': 'application/json',
127
- 'accept-language': 'zh-CN,zh;q=0.9',
128
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.58',
129
- 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/MacIntel',
130
- 'authority': 'edgeservices.bing.com',
131
- 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
132
- 'accept-language': 'en-US,en;q=0.9',
133
- 'cache-control': 'max-age=0',
134
- 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
135
- 'sec-ch-ua-arch': '"x86"',
136
- 'sec-ch-ua-bitness': '"64"',
137
- 'sec-ch-ua-full-version': '"110.0.1587.69"',
138
- 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
139
- 'sec-ch-ua-mobile': '?0',
140
- 'sec-ch-ua-model': '""',
141
- 'sec-ch-ua-platform': '"Windows"',
142
- 'sec-ch-ua-platform-version': '"15.0.0"',
143
- 'sec-fetch-dest': 'document',
144
- 'sec-fetch-mode': 'navigate',
145
- 'sec-fetch-site': 'none',
146
- 'sec-fetch-user': '?1',
147
- 'upgrade-insecure-requests': '1',
148
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
149
- 'x-edge-shopping-flag': '1',
150
- 'x-forwarded-for': Defaults.ip_address
151
- })
152
-
153
- conversationId = create.json().get('conversationId')
154
- clientId = create.json().get('clientId')
155
- conversationSignature = create.json().get('conversationSignature')
156
-
157
- if not conversationId or not clientId or not conversationSignature and _ == 4:
158
- raise Exception('Failed to create conversation.')
159
-
160
- return conversationId, clientId, conversationSignature
161
-
162
-
163
- async def stream_generate(prompt: str, mode: optionsSets.optionSet = optionsSets.jailbreak, context: bool or str = False):
164
- timeout = aiohttp.ClientTimeout(total=900)
165
- session = aiohttp.ClientSession(timeout=timeout)
166
-
167
- conversationId, clientId, conversationSignature = await create_conversation()
168
-
169
- wss = await session.ws_connect('wss://sydney.bing.com/sydney/ChatHub', ssl=ssl_context, autoping=False,
170
- headers={
171
- 'cookie': '_U=1',
172
- 'accept': 'application/json',
173
- 'accept-language': 'en-US,en;q=0.9',
174
- 'content-type': 'application/json',
175
- 'sec-ch-ua': '"Not_A Brand";v="99", "Microsoft Edge";v="110", "Chromium";v="110"',
176
- 'sec-ch-ua-arch': '"x86"',
177
- 'sec-ch-ua-bitness': '"64"',
178
- 'sec-ch-ua-full-version': '"109.0.1518.78"',
179
- 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
180
- 'sec-ch-ua-mobile': '?0',
181
- 'sec-ch-ua-model': '',
182
- 'sec-ch-ua-platform': '"Windows"',
183
- 'sec-ch-ua-platform-version': '"15.0.0"',
184
- 'sec-fetch-dest': 'empty',
185
- 'sec-fetch-mode': 'cors',
186
- 'sec-fetch-site': 'same-origin',
187
- 'x-ms-client-request-id': str(uuid.uuid4()),
188
- 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
189
- 'Referer': 'https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx',
190
- 'Referrer-Policy': 'origin-when-cross-origin',
191
- 'x-forwarded-for': Defaults.ip_address
192
- })
193
-
194
- await wss.send_str(_format({'protocol': 'json', 'version': 1}))
195
- await wss.receive(timeout=900)
196
-
197
- struct = {
198
- 'arguments': [
199
- {
200
- **mode,
201
- 'source': 'cib',
202
- 'allowedMessageTypes': Defaults.allowedMessageTypes,
203
- 'sliceIds': Defaults.sliceIds,
204
- 'traceId': os.urandom(16).hex(),
205
- 'isStartOfSession': True,
206
- 'message': Defaults.location | {
207
- 'author': 'user',
208
- 'inputMethod': 'Keyboard',
209
- 'text': prompt,
210
- 'messageType': 'Chat'
211
- },
212
- 'conversationSignature': conversationSignature,
213
- 'participant': {
214
- 'id': clientId
215
- },
216
- 'conversationId': conversationId
217
- }
218
- ],
219
- 'invocationId': '0',
220
- 'target': 'chat',
221
- 'type': 4
222
- }
223
-
224
- if context:
225
- struct['arguments'][0]['previousMessages'] = [
226
- {
227
- "author": "user",
228
- "description": context,
229
- "contextType": "WebPage",
230
- "messageType": "Context",
231
- "messageId": "discover-web--page-ping-mriduna-----"
232
- }
233
- ]
234
-
235
- await wss.send_str(_format(struct))
236
-
237
- final = False
238
- draw = False
239
- resp_txt = ''
240
- result_text = ''
241
- resp_txt_no_link = ''
242
- cache_text = ''
243
-
244
- while not final:
245
- msg = await wss.receive(timeout=900)
246
- objects = msg.data.split(Defaults.delimiter)
247
-
248
- for obj in objects:
249
- if obj is None or not obj:
250
- continue
251
-
252
- response = json.loads(obj)
253
- if response.get('type') == 1 and response['arguments'][0].get('messages',):
254
- if not draw:
255
- if (response['arguments'][0]['messages'][0]['contentOrigin'] != 'Apology') and not draw:
256
- resp_txt = result_text + \
257
- response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get(
258
- 'text', '')
259
- resp_txt_no_link = result_text + \
260
- response['arguments'][0]['messages'][0].get(
261
- 'text', '')
262
-
263
- if response['arguments'][0]['messages'][0].get('messageType',):
264
- resp_txt = (
265
- resp_txt
266
- + response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0]['inlines'][0].get('text')
267
- + '\n'
268
- )
269
- result_text = (
270
- result_text
271
- + response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0]['inlines'][0].get('text')
272
- + '\n'
273
- )
274
-
275
- if cache_text.endswith(' '):
276
- final = True
277
- if wss and not wss.closed:
278
- await wss.close()
279
- if session and not session.closed:
280
- await session.close()
281
-
282
- yield (resp_txt.replace(cache_text, ''))
283
- cache_text = resp_txt
284
-
285
- elif response.get('type') == 2:
286
- if response['item']['result'].get('error'):
287
- if wss and not wss.closed:
288
- await wss.close()
289
- if session and not session.closed:
290
- await session.close()
291
-
292
- raise Exception(
293
- f"{response['item']['result']['value']}: {response['item']['result']['message']}")
294
-
295
- if draw:
296
- cache = response['item']['messages'][1]['adaptiveCards'][0]['body'][0]['text']
297
- response['item']['messages'][1]['adaptiveCards'][0]['body'][0]['text'] = (
298
- cache + resp_txt)
299
-
300
- if (response['item']['messages'][-1]['contentOrigin'] == 'Apology' and resp_txt):
301
- response['item']['messages'][-1]['text'] = resp_txt_no_link
302
- response['item']['messages'][-1]['adaptiveCards'][0]['body'][0]['text'] = resp_txt
303
-
304
- # print('Preserved the message from being deleted', file=sys.stderr)
305
-
306
- final = True
307
- if wss and not wss.closed:
308
- await wss.close()
309
- if session and not session.closed:
310
- await session.close()
311
-
312
-
313
- def run(generator):
314
- loop = asyncio.get_event_loop()
315
- gen = generator.__aiter__()
316
-
317
- while True:
318
- try:
319
- next_val = loop.run_until_complete(gen.__anext__())
320
- yield next_val
321
-
322
- except StopAsyncIteration:
323
- break
324
-
325
- #print('Done')
326
-
327
-
328
- def convert(messages):
329
- context = ""
330
-
331
- for message in messages:
332
- context += "[%s](#message)\n%s\n\n" % (message['role'],
333
- message['content'])
334
-
335
- return context
336
-
337
-
338
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
339
- if len(messages) < 2:
340
- prompt = messages[0]['content']
341
- context = False
342
-
343
- else:
344
- prompt = messages[-1]['content']
345
- context = convert(messages[:-1])
346
-
347
- response = run(stream_generate(prompt, optionsSets.jailbreak, context))
348
- for token in response:
349
- yield (token)
350
-
351
- #print('Done')
352
-
353
-
354
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
355
- '(%s)' % ', '.join(
356
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/A comparison of plaxis 3d v21 and other geotechnical software.md DELETED
@@ -1,17 +0,0 @@
1
-
2
- <h1>How to use plaxis 3d v21 for geotechnical engineering</h1>
3
- <p>Plaxis 3d v21 is a powerful software for geotechnical engineering that allows you to model complex soil and rock behavior, groundwater flow, and structural interaction in three dimensions. With plaxis 3d v21, you can perform advanced analyses such as consolidation, dynamic loading, seismic response, slope stability, excavation, tunneling, foundation design, and more.</p>
4
- <p>In this article, we will show you how to use plaxis 3d v21 for some common geotechnical engineering applications. We will assume that you have already installed plaxis 3d v21 on your computer and have a basic familiarity with its user interface. If not, you can download a free trial version from the official website and follow the tutorials provided there.</p>
5
- <h2>plaxis 3d v21 crack</h2><br /><p><b><b>DOWNLOAD</b> &mdash; <a href="https://byltly.com/2uKzgR">https://byltly.com/2uKzgR</a></b></p><br /><br />
6
- <h2>Creating a project</h2>
7
- <p>The first step to use plaxis 3d v21 is to create a new project. To do this, open plaxis 3d v21 and click on the "New" button on the toolbar. You will be prompted to enter a project name, a description, and a folder location. You can also choose a template from the list of predefined projects that suit your needs. For example, if you want to model an excavation in clay, you can select the "Excavation in clay" template.</p>
8
- <p>After creating a new project, you will see four main windows: the Model Explorer, the Command Line, the Output Window, and the View Window. The Model Explorer shows the hierarchy of objects in your project, such as materials, boundaries, loads, meshes, etc. The Command Line allows you to enter commands and parameters for your project. The Output Window displays messages and warnings from the software. The View Window shows the graphical representation of your model and results.</p>
9
- <h2>Defining materials</h2>
10
- <p>The next step is to define the materials that you want to use in your model. Plaxis 3d v21 has a library of predefined materials that cover various types of soils and rocks. You can access the library by clicking on the "Materials" button on the toolbar. You can also create your own custom materials by clicking on the "New" button in the Materials window.</p>
11
- <p>To define a material, you need to specify its name, type, color, and properties. The type of material determines the constitutive model that will be used to describe its behavior. Plaxis 3d v21 supports several constitutive models for soils and rocks, such as linear elastic, Mohr-Coulomb, Hardening Soil, Soft Soil Creep, Jointed Rock, etc. The properties of a material include parameters such as density, Young's modulus, Poisson's ratio, cohesion, friction angle, dilatancy angle, permeability, etc. You can enter these parameters manually or use the "Estimate" button to calculate them from empirical correlations.</p>
12
- <h2>Creating geometry</h2>
13
- <p>After defining the materials, you need to create the geometry of your model. Plaxis 3d v21 allows you to create geometry using two methods: extrusion and volume generation. Extrusion is a method of creating geometry by extruding a two-dimensional cross-section along a path. Volume generation is a method of creating geometry by combining or subtracting predefined shapes such as boxes, cylinders, spheres, etc.</p>
14
- <p>To create geometry using extrusion, you need to first draw a cross-section using points and lines in the Geometry mode. You can access the Geometry mode by clicking on the "Geometry" button on the toolbar. You can also import a cross-section from an external file such as DXF or CSV. After drawing or importing a cross-section, you need to assign materials to each region using the "Assign Material" tool. Then you need to select a path along which you want to extrude your cross-section using the "Select Path" tool. Finally</p>
15
- <p></p> ddb901b051<br />
16
- <br />
17
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Aerofly Rc 7 Cracked Pepper -.md DELETED
@@ -1,114 +0,0 @@
1
- <br />
2
- <h1>Aerofly Rc 7 Cracked Pepper - The Ultimate R/C Flight Simulator</h1>
3
- <p>If you are looking for a realistic and immersive way to fly radio controlled models, you should check out Aerofly Rc 7 Cracked Pepper. This is a cracked version of the popular R/C flight simulator Aerofly RC 7, which offers you a stunning level of realism and physics simulation. You can choose from over 200 models and 50 sceneries, and fly with friends all over the world in multiplayer mode. You can also customize your models and edit the sceneries with the included tools.</p>
4
- <h2>Aerofly Rc 7 Cracked Pepper -</h2><br /><p><b><b>Download File</b> &#127383; <a href="https://imgfil.com/2uxWYB">https://imgfil.com/2uxWYB</a></b></p><br /><br />
5
- <h2>Why Aerofly Rc 7 Cracked Pepper?</h2>
6
- <p>Aerofly Rc 7 Cracked Pepper is not just a game, it is a learning tool that will help you improve your R/C flying skills. Whether you are a beginner or an expert, you will find something to challenge and entertain you in this simulator. You can learn the basics of flying with the help of tutorials and trainers, or test your abilities in various competitions and game modes. You can also adjust the wind and time of day settings to create different flying conditions.</p>
7
- <p>Aerofly Rc 7 Cracked Pepper is also a great way to experience the thrill of flying without spending a lot of money on real models and equipment. You can fly any model you want, from quadcopters to jets, from gliders to helicopters, without worrying about crashing or damaging them. You can also explore different sceneries, from urban landscapes to alpine mountains, from tropical islands to desert canyons.</p>
8
- <h2>How to Download and Install Aerofly Rc 7 Cracked Pepper?</h2>
9
- <p>Downloading and installing Aerofly Rc 7 Cracked Pepper is very easy and fast. You just need to follow these simple steps:</p>
10
- <ol>
11
- <li>Go to the website <a href="https://cracked-gamespc.com/games/aerofly-rc-7-ultimate-edition">https://cracked-gamespc.com/games/aerofly-rc-7-ultimate-edition</a> and choose one of the download servers.</li>
12
- <li>Download the torrent file or the direct link of the game.</li>
13
- <li>Open the downloaded file with your preferred torrent client or unzip it with WinRAR.</li>
14
- <li>Burn or mount the image file with Daemon Tools or similar software.</li>
15
- <li>Install the game by following the instructions on the screen.</li>
16
- <li>Copy over the cracked content from the /Crack directory on the image to your game install directory.</li>
17
- <li>Play the game and enjoy!</li>
18
- </ol>
19
- <h2>Conclusion</h2>
20
- <p>Aerofly Rc 7 Cracked Pepper is a must-have for any R/C enthusiast who wants to experience the most realistic and fun R/C flight simulator ever. With its amazing graphics, physics, models, sceneries, and features, it will keep you hooked for hours. You can download it for free from our website and start flying right away. Don't miss this opportunity to fly like a pro with Aerofly Rc 7 Cracked Pepper!</p>
21
- <p></p>
22
- <h2>What are the Reviews of Aerofly Rc 7 Cracked Pepper?</h2>
23
- <p>Aerofly Rc 7 Cracked Pepper has received many positive reviews from users and critics alike. Here are some of the testimonials from satisfied customers:</p>
24
- <blockquote>
25
- <p>"I have been flying R/C models for over 20 years and I have to say that Aerofly Rc 7 Cracked Pepper is the best simulator I have ever used. The graphics are amazing, the physics are realistic, and the models are diverse and detailed. I can fly any model I want in any scenery I want without spending a fortune on them. It is also a great way to practice and improve my skills. I highly recommend it to anyone who loves R/C flying."</p>
26
- <cite>- John, USA</cite>
27
- </blockquote>
28
- <blockquote>
29
- <p>"Aerofly Rc 7 Cracked Pepper is a fantastic game that offers a lot of fun and challenge. I enjoy flying different models and sceneries, and competing with other players online. The game modes are very entertaining and addictive. The multiplayer mode is especially awesome, as I can fly with my friends and chat with them. The game is also very easy to install and run, thanks to the cracked version. It is definitely worth downloading."</p>
30
- <cite>- Anna, Germany</cite>
31
- </blockquote>
32
- <blockquote>
33
- <p>"I am a beginner in R/C flying and I was looking for a simulator that would help me learn the basics. I found Aerofly Rc 7 Cracked Pepper and I was amazed by how realistic and user-friendly it is. The tutorials and trainers are very helpful and informative. The flight modes are adjustable to suit my level and preference. The models and sceneries are stunning and varied. I have learned a lot from this simulator and I have also had a lot of fun."</p>
34
- <cite>- Lee, China</cite>
35
- </blockquote>
36
-
37
- <h2>What are the Tips for Playing Aerofly Rc 7 Cracked Pepper?</h2>
38
- <p>If you want to get the most out of Aerofly Rc 7 Cracked Pepper, here are some tips to help you play better:</p>
39
- <ul>
40
- <li>Use a joystick, gamepad or dedicated R/C controller with 4 axes for the best flying experience. Some models (especially helicopters) can't be flown properly with a joystick with only 3 axes or less.</li>
41
- <li>Read the manual and watch the videos on the website to learn more about the features and functions of the simulator.</li>
42
- <li>Start with simple models and sceneries, and gradually move on to more complex ones as you gain confidence and skill.</li>
43
- <li>Try different flight modes and game modes to challenge yourself and have fun.</li>
44
- <li>Join the online community and fly with other players around the world. You can also share your models and sceneries with them.</li>
45
- <li>Keep your game updated to the latest version to enjoy new models, sceneries, features, and bug fixes.</li>
46
- </ul>
47
-
48
- <h2>Where to Download Aerofly Rc 7 Cracked Pepper?</h2>
49
- <p>If you are ready to download Aerofly Rc 7 Cracked Pepper, you can do so from our website. We offer you a fast, safe, and easy way to get this amazing simulator for free. You don't need to register or pay anything to download it. You just need to follow these simple steps:</p>
50
- <ol>
51
- <li>Click on the download button below.</li>
52
- <li>Choose one of the download servers that suits you best.</li>
53
- <li>Download the torrent file or the direct link of the game.</li>
54
- <li>Open the downloaded file with your preferred torrent client or unzip it with WinRAR.</li>
55
- <li>Burn or mount the image file with Daemon Tools or similar software.</li>
56
- <li>Install the game by following the instructions on the screen.</li>
57
- <li>Copy over the cracked content from the /Crack directory on the image to your game install directory.</li>
58
- <li>Play the game and enjoy!</li>
59
- </ol>
60
- <h2>What are the Risks of Aerofly Rc 7 Cracked Pepper?</h2>
61
- <p>While Aerofly Rc 7 Cracked Pepper may seem like a great deal, it also comes with some risks that you should be aware of. Some of these risks are:</p>
62
- <ul>
63
- <li>You may be violating the intellectual property rights of the original developers and publishers of Aerofly RC 7. This may expose you to legal actions and penalties.</li>
64
- <li>You may be downloading and installing malware or viruses along with the cracked game. This may harm your computer and compromise your personal data.</li>
65
- <li>You may be unable to access some features and updates of the game. This may affect your gameplay experience and performance.</li>
66
- <li>You may be unable to get technical support or customer service from the official sources. This may leave you with unresolved issues and problems.</li>
67
- <li>You may be missing out on the benefits of supporting the developers and publishers of Aerofly RC 7. This may discourage them from creating more quality games in the future.</li>
68
- </ul>
69
-
70
- <h2>What are the Alternatives to Aerofly Rc 7 Cracked Pepper?</h2>
71
- <p>If you want to enjoy Aerofly RC 7 without risking any of the above-mentioned risks, you have some alternatives to choose from. Some of these alternatives are:</p>
72
- <ul>
73
- <li>You can buy the original game from the official website or from other authorized platforms. This will give you access to all the features and updates of the game, as well as technical support and customer service. You will also support the developers and publishers of Aerofly RC 7 and encourage them to make more games.</li>
74
- <li>You can try the free demo version of Aerofly RC 7 from the official website. This will give you a taste of the game and its features, without costing you anything. You can then decide if you want to buy the full version or not.</li>
75
- <li>You can look for other R/C flight simulators that are free or cheaper than Aerofly RC 7. There are many options available online, such as RealFlight, Phoenix RC, ClearView RC, etc. You can compare their features, graphics, models, sceneries, and prices, and choose the one that suits you best.</li>
76
- </ul>
77
-
78
- <h2>Conclusion</h2>
79
- <p>Aerofly Rc 7 Cracked Pepper is a cracked version of Aerofly RC 7, a realistic and immersive R/C flight simulator that offers you over 200 models and over 50 sceneries to fly with. It is a fun and beneficial game that will help you learn and improve your R/C flying skills, as well as enjoy flying anytime and anywhere. However, it also comes with some risks that may harm your computer, your personal data, and your legal status. Therefore, you should consider the alternatives to Aerofly Rc 7 Cracked Pepper, such as buying the original game, trying the free demo version, or looking for other R/C flight simulators.</p>
80
- <h2>How to Fly with Aerofly Rc 7 Cracked Pepper?</h2>
81
- <p>Flying with Aerofly Rc 7 Cracked Pepper is easy and fun. You just need to follow these simple steps:</p>
82
- <ol>
83
- <li>Launch the game and select your model and scenery from the menus.</li>
84
- <li>Connect your joystick, gamepad or R/C controller to your computer and calibrate it if necessary.</li>
85
- <li>Press the spacebar to start the engine and use the throttle to increase or decrease the power.</li>
86
- <li>Use the elevator, aileron, rudder and flaps to control the pitch, roll, yaw and lift of your model.</li>
87
- <li>Use the camera controls to change the view angle and zoom level.</li>
88
- <li>Press the escape key to pause the game and access the options menu.</li>
89
- </ol>
90
-
91
- <h2>How to Improve Your Skills with Aerofly Rc 7 Cracked Pepper?</h2>
92
- <p>If you want to improve your skills with Aerofly Rc 7 Cracked Pepper, here are some tips to help you do so:</p>
93
- <ul>
94
- <li>Watch the videos on the website or on YouTube to learn more about the models, sceneries, features and functions of the simulator.</li>
95
- <li>Read the manual and the online help to get more information and tips about the simulator.</li>
96
- <li>Use the tutorials and trainers to learn the basics of flying and master different maneuvers and techniques.</li>
97
- <li>Use the flight recorder and replay function to analyze your flights and correct your mistakes.</li>
98
- <li>Use the wind and time of day settings to create different flying conditions and challenges.</li>
99
- <li>Use the competitions and game modes to test your skills and have fun.</li>
100
- </ul>
101
-
102
- <h2>How to Have Fun with Aerofly Rc 7 Cracked Pepper?</h2>
103
- <p>If you want to have fun with Aerofly Rc 7 Cracked Pepper, here are some suggestions to help you do so:</p>
104
- <ul>
105
- <li>Fly different models and sceneries and explore their features and details.</li>
106
- <li>Fly with friends online or on a local network and chat with them.</li>
107
- <li>Share your models and sceneries with other players and download theirs.</li>
108
- <li>Customize your models and create your own sceneries with the model and scenery editor.</li>
109
- <li>Try different flight modes and game modes and see how they affect your flying experience.</li>
110
- </ul>
111
- <h2>Conclusion</h2>
112
- <p>Aerofly Rc 7 Cracked Pepper is a cracked version of Aerofly RC 7, a realistic and immersive R/C flight simulator that offers you over 200 models and over 50 sceneries to fly with. It is a fun and beneficial game that will help you learn and improve your R/C flying skills, as well as enjoy flying anytime and anywhere. However, it also comes with some risks that may harm your computer, your personal data, and your legal status. Therefore, you should consider the alternatives to Aerofly Rc 7 Cracked Pepper, such as buying the original game, trying the free demo version, or looking for other R/C flight simulators. Whether you choose to download Aerofly Rc 7 Cracked Pepper or not, we hope you have a great time flying with this amazing simulator.</p> 3cee63e6c2<br />
113
- <br />
114
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Aleo Swf To Gif Converter Full 12 PORTABLE.md DELETED
@@ -1,28 +0,0 @@
1
- <h2>Aleo Swf To Gif Converter Full 12</h2><br /><p><b><b>DOWNLOAD</b> &#10031; <a href="https://imgfil.com/2uxXni">https://imgfil.com/2uxXni</a></b></p><br /><br />
2
- <br />
3
- gif * GIF to SWF Converter * Convert GIF to Flash SWF * Converts SWF, SWF to Video, Video to SWF * Any Video to GIF Conversion * GIF to.GIF * Convert GIF files to JPG, BMP, PNG * Converts GIF to JPG, BMP, PNG * Transforms GIF into JPG, BMP, PNG, JPEG * Convert GIF to GIF image * Image to GIF Converter * Generates animated GIF from Video, Photo, Image, PNG, JPG, BMP * Any Image to Animated GIF * Generates Animated GIF from Photo, Image, Video, PDF, PNG, JPG, BMP, GIF * You can also convert multiple SWF and GIF files into animated GIF file.
4
-
5
- Run as a portable application, the SWF to GIF Converter supports all Windows version including Vista, 7, 8, 8.1 and 10. For more details, please click the Download button below.
6
-
7
- [Home](./index.md) &gt; [puppeteer](./puppeteer.md) &gt; [Protocol](./puppeteer.protocol.md) &gt; [LegacySessionStorage](./puppeteer.protocol.legacysessionstorage.md) &gt; [FileSystemOptions](./puppeteer.protocol.legacysessionstorage.filesystemoptions.md)
8
-
9
- ## Protocol.LegacySessionStorage.FileSystemOptions type
10
-
11
- Signature:
12
-
13
- ```typescript
14
-
15
- export declare type FileSystemOptions =
16
-
17
- fileSystemOptions?: FileSystemOptions;
18
-
19
- contents?: string;
20
-
21
- ;
22
-
23
- ```
24
-
25
- Melt-extruded glass sheets are used to manufacture a variety of different glass products, including, but not limited to, windows, doors, shower and bath enclosures, and architectural glass. The quality of the appearance of a manufactured glass product is in large part dependent on the quality of the glass sheets that are used to manufacture that product. It is desirable for glass sheets that are to be used to manufacture glass products to have optical 4fefd39f24<br />
26
- <br />
27
- <br />
28
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Communication Engineering By Js Chitode 62.pdf.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Communication Engineering By Js Chitode 62.pdf</h2><br /><p><b><b>DOWNLOAD</b> &#9658;&#9658;&#9658;&#9658;&#9658; <a href="https://imgfil.com/2uy24l">https://imgfil.com/2uy24l</a></b></p><br /><br />
2
- <br />
3
- Communication Engineering ChitodeCommunication Engineering By Js Chitode Pdf Free 62Download book Communication. Engineering by Dr. J S Chitode . 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1line/AutoGPT/autogpt/utils.py DELETED
@@ -1,77 +0,0 @@
1
- import os
2
-
3
- import requests
4
- import yaml
5
- from colorama import Fore
6
- from git import Repo
7
-
8
-
9
- def clean_input(prompt: str = ""):
10
- try:
11
- return input(prompt)
12
- except KeyboardInterrupt:
13
- print("You interrupted Auto-GPT")
14
- print("Quitting...")
15
- exit(0)
16
-
17
-
18
- def validate_yaml_file(file: str):
19
- try:
20
- with open(file, encoding="utf-8") as fp:
21
- yaml.load(fp.read(), Loader=yaml.FullLoader)
22
- except FileNotFoundError:
23
- return (False, f"The file {Fore.CYAN}`{file}`{Fore.RESET} wasn't found")
24
- except yaml.YAMLError as e:
25
- return (
26
- False,
27
- f"There was an issue while trying to read with your AI Settings file: {e}",
28
- )
29
-
30
- return (True, f"Successfully validated {Fore.CYAN}`{file}`{Fore.RESET}!")
31
-
32
-
33
- def readable_file_size(size, decimal_places=2):
34
- """Converts the given size in bytes to a readable format.
35
- Args:
36
- size: Size in bytes
37
- decimal_places (int): Number of decimal places to display
38
- """
39
- for unit in ["B", "KB", "MB", "GB", "TB"]:
40
- if size < 1024.0:
41
- break
42
- size /= 1024.0
43
- return f"{size:.{decimal_places}f} {unit}"
44
-
45
-
46
- def get_bulletin_from_web() -> str:
47
- try:
48
- response = requests.get(
49
- "https://raw.githubusercontent.com/Significant-Gravitas/Auto-GPT/master/BULLETIN.md"
50
- )
51
- if response.status_code == 200:
52
- return response.text
53
- except:
54
- return ""
55
-
56
-
57
- def get_current_git_branch() -> str:
58
- try:
59
- repo = Repo(search_parent_directories=True)
60
- branch = repo.active_branch
61
- return branch.name
62
- except:
63
- return ""
64
-
65
-
66
- def get_latest_bulletin() -> str:
67
- exists = os.path.exists("CURRENT_BULLETIN.md")
68
- current_bulletin = ""
69
- if exists:
70
- current_bulletin = open("CURRENT_BULLETIN.md", "r", encoding="utf-8").read()
71
- new_bulletin = get_bulletin_from_web()
72
- is_new_news = new_bulletin != current_bulletin
73
-
74
- if new_bulletin and is_new_news:
75
- open("CURRENT_BULLETIN.md", "w", encoding="utf-8").write(new_bulletin)
76
- return f" {Fore.RED}::UPDATED:: {Fore.CYAN}{new_bulletin}{Fore.RESET}"
77
- return current_bulletin
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Attack on Titan Fangame by Riva - Offline Mode APK Download.md DELETED
@@ -1,134 +0,0 @@
1
- <br />
2
- <h1>Attack on Titan by Riva APK Download: A Fan-Made Mobile Game Based on the Popular Anime Series</h1>
3
- <p>If you are a fan of Attack on Titan, the epic anime series about humanity's struggle against giant man-eating creatures, you might be interested in trying out a fan-made mobile game based on it. Attack on Titan by Riva is a 3D action game that lets you play as a member of the Survey Corps, an elite military force that fights against the titans using special gear and weapons. In this article, we will tell you what this game is, how to download and install it, why you should play it, and some tips and tricks for playing it.</p>
4
- <h2>What is Attack on Titan by Riva?</h2>
5
- <p>Attack on Titan by Riva is a fan-made mobile game developed by Riva Dev, a YouTube channel that creates games based on popular anime and manga series. The game is not affiliated with or endorsed by the official creators of Attack on Titan, Kodansha or Hajime Isayama. The game is inspired by the anime's story, characters, and art style, but it is not a direct adaptation of it. The game is also not available on the Google Play Store or any other official app store, so you will need to download it from a third-party source.</p>
6
- <h2>attack on titan by riva apk download</h2><br /><p><b><b>Download Zip</b> === <a href="https://urlin.us/2uSXai">https://urlin.us/2uSXai</a></b></p><br /><br />
7
- <h3>Features of the game</h3>
8
- <p>Attack on Titan by Riva has several features that make it an enjoyable and immersive game for fans of the anime. Some of these features are:</p>
9
- <ul>
10
- <li>A realistic 3D graphics engine that recreates the atmosphere and scenery of the anime.</li>
11
- <li>A dynamic movement and combat system that allows you to use your 3D Maneuver Gear to fly around and slash at titans.</li>
12
- <li>A variety of titans with different sizes, shapes, and behaviors that pose different threats and challenges.</li>
13
- <li>A character customization system that lets you choose your appearance, outfit, hairstyle, and accessories.</li>
14
- <li>An equipment upgrade system that lets you improve your blades, gas tanks, and other gear.</li>
15
- <li>A skill upgrade system that lets you unlock and enhance different abilities and techniques.</li>
16
- <li>A story mode that follows the main events and characters of the anime.</li>
17
- <li>A survival mode that tests your skills and endurance against waves of titans.</li>
18
- <li>A multiplayer mode that lets you team up with other players online to fight titans together.</li>
19
- </ul>
20
- <h3>How to download and install the game</h3>
21
- <p>Since Attack on Titan by Riva is not available on any official app store, you will need to download it from a third-party source. Here are the steps to do so:</p>
22
- <ol>
23
- <li>Go to [this link](^1^) or [this link](^2^) to access the YouTube videos where Riva Dev uploaded the game. These videos also show some gameplay footage and instructions for playing the game.</li>
24
- <li>In the description of the videos, you will find a link to download the APK file of the game. The APK file is a compressed file that contains all the data and files needed to run the game on your device. The file size is about 51 MB or 110 MB depending on the version.</li>
25
- <li>Click on the link to download the APK file to your device. You might need to enable unknown sources in your device settings to allow downloading files from third-party sources.</li>
26
- <li>Once the download is complete, locate the APK file in your device storage and tap on it to install it. You might need to grant some permissions to allow the installation process.</li> <li>After the installation is done, you can launch the game from your app drawer or home screen. You might need to allow some additional permissions to access your device features and internet connection.</li>
27
- </ol>
28
- <p>Congratulations, you have successfully downloaded and installed Attack on Titan by Riva on your device. Now you can enjoy playing this fan-made game based on your favorite anime series.</p>
29
- <h2>Why should you play Attack on Titan by Riva?</h2>
30
- <p>Attack on Titan by Riva is not just a simple fan-made game. It is a game that offers a lot of fun and excitement for fans of the anime and anyone who likes action games. Here are some reasons why you should play this game:</p>
31
- <h3>Enjoy the thrilling action of fighting titans</h3>
32
- <p>One of the main attractions of this game is the action-packed gameplay that lets you fight against the titans using your 3D Maneuver Gear. You can fly around the battlefield, dodge the titans' attacks, and slash at their weak spots. You can also use different skills and techniques to enhance your combat abilities. The game has a realistic physics engine that makes the movement and combat feel smooth and responsive. The game also has a gore system that makes the battles more intense and satisfying.</p>
33
- <h3>Experience the story and characters of the anime</h3>
34
- <p>Another reason to play this game is to experience the story and characters of the anime. The game has a story mode that follows the main events and characters of the anime, such as Eren, Mikasa, Armin, Levi, Erwin, and more. You can relive the epic moments and scenes from the anime, such as the fall of Wall Maria, the battle of Trost, the clash of titans, and more. You can also interact with the characters and learn more about their personalities and backgrounds.</p>
35
- <h3>Customize your own character and equipment</h3>
36
- <p>A third reason to play this game is to customize your own character and equipment. The game has a character customization system that lets you choose your appearance, outfit, hairstyle, and accessories. You can create your own unique character that suits your style and preferences. You can also customize your equipment, such as your blades, gas tanks, hooks, and other gear. You can upgrade your equipment to make them more powerful and efficient.</p>
37
- <h2>Tips and tricks for playing Attack on Titan by Riva</h2>
38
- <p>Attack on Titan by Riva is a game that requires skill and strategy to play well. Here are some tips and tricks that can help you improve your gameplay and have more fun:</p>
39
- <p>attack on titan mobile fan game by riva<br />
40
- attack on titan fangame by riva offline<br />
41
- attack on titan mobile fangame by riva apk<br />
42
- attack on titan mobile fangame by riva mod apk<br />
43
- attack on titan mobile fangame by riva v03.0<br />
44
- attack on titan mobile fangame by riva v03.0 mod apk<br />
45
- attack on titan mobile fangame by riva v03.0 by julhiecio.apk<br />
46
- attack on titan mobile fangame by riva v03.0 by julie cio.apk mod<br />
47
- attack on titan mobile fangame by riva v03.0 by julhiecio.apk mod<br />
48
- attack on titan mobile fangame by riva v03 0 by julie cio apk mod<br />
49
- attack on titan mobile fangame by riva mediafire link<br />
50
- attack on titan mobile fangame by riva youtube video<br />
51
- attack on titan tribute game by riva dev<br />
52
- attack on titan tribute game by riva dev apk<br />
53
- attack on titan tribute game by riva dev download<br />
54
- attack on titan tribute game by riva dev offline<br />
55
- attack on titan tribute game by riva dev mediafire link<br />
56
- attack on titan tribute game by riva dev youtube video<br />
57
- how to download attack on titan by riva apk<br />
58
- how to install attack on titan by riva apk<br />
59
- how to play attack on titan by riva apk<br />
60
- how to update attack on titan by riva apk<br />
61
- is attack on titan by riva apk safe<br />
62
- is attack on titan by riva apk legal<br />
63
- is attack on titan by riva apk free<br />
64
- best settings for attack on titan by riva apk<br />
65
- best characters for attack on titan by riva apk<br />
66
- best tips and tricks for attack on titan by riva apk<br />
67
- best review for attack on titan by riva apk<br />
68
- best alternative for attack on titan by riva apk<br />
69
- what is new in attack on titan by riva apk v03.0<br />
70
- what is new in attack on titan tribute game by riva dev v3.0 apk (110 mb)<br />
71
- what is the difference between attack on titan mobile fangame and tribute game by riva dev<br />
72
- what is the special code for attack on titan mobile fangame by riva v03.0<br />
73
- what is the code for aot fan game survival roark aot fan game s rank run download <br />
74
- where can I find more information about attack on titan by riva apk <br />
75
- where can I find more videos about attack on titan by riva apk <br />
76
- where can I find more games like attack on titan by riva apk <br />
77
- where can I find more fans of attack on titan by riva apk <br />
78
- where can I find more updates of attack on titan by riva apk</p>
79
- <h3>Master the movement and combat system</h3>
80
- <p>The movement and combat system of this game is one of its most important aspects. You need to master how to use your 3D Maneuver Gear to fly around and fight titans. You need to learn how to aim your hooks, control your speed and direction, balance your gas consumption, and avoid obstacles. You also need to learn how to attack titans effectively, such as targeting their weak spots, timing your slashes, using skills wisely, and avoiding their counterattacks.</p>
81
- <h3>Upgrade your skills and gear</h3>
82
- <p>The game has a skill upgrade system that lets you unlock and enhance different abilities and techniques. You can upgrade your skills using skill points that you earn by playing the game. Some of the skills you can upgrade are: </p>
83
- <ul>
84
- <li>Attack: Increases your blade damage.</li>
85
- <li>Speed: Increases your movement speed.</li>
86
- <li>Gas: Increases your gas capacity.</li>
87
- <li>Hook: Increases your hook range.</li>
88
- <li>Critical: Increases your critical hit chance.</li>
89
- <li>Dodge: Increases your dodge chance.</li>
90
- <li>Heal: Increases your healing rate.</li>
91
- <li>Burst: Increases your burst damage.</li>
92
- <li>Skill: Unlocks special skills such as spin slash, back slash, air slash, etc.</li>
93
- </ul>
94
- <p>The game also has an equipment upgrade system that lets you improve your blades, gas tanks, hooks, and other gear. You can upgrade your equipment using materials that you collect by playing the game or buying them from the shop. Some of the benefits of upgrading your equipment are:</p>
95
- <ul>
96
- <li>Blades: Increases your blade durability and sharpness.</li>
97
- <li>Gas Tanks: Increases your gas pressure and efficiency.</li>
98
- <li>Hooks: Increases your hook strength and durability.</li>
99
- <li>Gear: Increases your gear durability and performance.</li>
100
- </ul>
101
- <h3>Explore the different modes and challenges</h3>
102
- <p>The game has different modes and challenges that offer different gameplay experiences and rewards. Some of these modes and challenges are:</p>
103
- <ul>
104
- <li>Story Mode: Follows the main events and characters of the anime. You can unlock new chapters and scenes by completing the previous ones.</li>
105
- <li>Survival Mode: Tests your skills and endurance against waves of titans. You can earn coins and materials by killing titans and surviving as long as possible.</li>
106
- <li>Multiplayer Mode: Lets you team up with other players online to fight titans together. You can chat with your teammates, share items, and cooperate to defeat the enemies.</li>
107
- <li>Challenges: Gives you specific tasks and objectives to complete in a limited time or with limited resources. You can earn rewards and achievements by completing the challenges.</li>
108
- </ul>
109
- <h2>Conclusion</h2>
110
- <p>Attack on Titan by Riva is a fan-made mobile game that offers a lot of fun and excitement for fans of the anime and anyone who likes action games. You can enjoy the thrilling action of fighting titans, experience the story and characters of the anime, customize your own character and equipment, and explore the different modes and challenges. You can download and install the game from a third-party source by following the steps we provided. You can also use our tips and tricks to improve your gameplay and have more fun.</p>
111
- <h3>Summary of the main points</h3>
112
- <p>To summarize, here are the main points of this article:</p>
113
- <ul>
114
- <li>Attack on Titan by Riva is a fan-made mobile game based on the popular anime series.</li>
115
- <li>The game is not available on any official app store, so you will need to download it from a third-party source.</li>
116
- <li>The game has several features that make it an enjoyable and immersive game, such as realistic 3D graphics, dynamic movement and combat system, variety of titans, character customization, equipment upgrade, skill upgrade, story mode, survival mode, multiplayer mode, and challenges.</li>
117
- <li>The game requires skill and strategy to play well, so you can use our tips and tricks to master the movement and combat system, upgrade your skills and gear, and explore the different modes and challenges.</li>
118
- </ul>
119
- <h3>Call to action</h3>
120
- <p>If you are interested in playing Attack on Titan by Riva, don't hesitate to download it now and try it out. You will not regret it. This game will give you hours of entertainment and satisfaction. You will feel like you are part of the Survey Corps, fighting for humanity's survival against the titans. You will also get to relive the epic moments and scenes from the anime, as well as create your own unique character and story. So what are you waiting for? Download Attack on Titan by Riva now and join the fight!</p>
121
- <h2>FAQs</h2>
122
- <p>Here are some frequently asked questions about Attack on Titan by Riva:</p>
123
- <h4>Is Attack on Titan by Riva free?</h4>
124
- <p>Yes, Attack on Titan by Riva is free to download and play. However, the game may contain some ads or in-app purchases that can enhance your gameplay or support the developer.</p>
125
- <h4>Is Attack on Titan by Riva safe?</h4>
126
- <p>Yes, Attack on Titan by Riva is safe to download and play. However, since it is not available on any official app store, you will need to download it from a third-party source. Therefore, you should be careful about where you download it from and what permissions you grant it. You should also scan the APK file with an antivirus software before installing it.</p>
127
- <h4>Is Attack on Titan by Riva updated?</h4>
128
- <p>Yes, Attack on Titan by Riva is updated regularly by the developer. The developer posts updates on their YouTube channel, where they also upload gameplay videos and instructions for playing the game. You can subscribe to their channel to get notified of new updates and features.</p>
129
- <h4>How can I contact the developer of Attack on Titan by Riva?</h4>
130
- <p>You can contact the developer of Attack on Titan by Riva through their YouTube channel or their email address. Their YouTube channel is [Riva Dev] and their email address is [email protected]. You can send them feedback, suggestions, bug reports, or any other inquiries related to the game.</p>
131
- <h4>How can I support the developer of Attack on Titan by Riva?</h4>
132
- <p>You can support the developer of Attack on Titan by Riva by playing their game, sharing it with your friends, leaving positive reviews and ratings, subscribing to their YouTube channel, watching their ads or making in-app purchases, or donating to them via PayPal or Patreon. You can find their PayPal and Patreon links in their YouTube videos' descriptions.</p> 197e85843d<br />
133
- <br />
134
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Edge 80 The Browser That Gives You More Control and Privacy.md DELETED
@@ -1,217 +0,0 @@
1
- <br />
2
- <h1>Download Edge 80: How to Get the Latest Version of Microsoft's Browser</h1>
3
- <p>If you are looking for a fast, secure, and modern browser that offers a lot of features and benefits, you might want to download Edge 80. Edge 80 is the latest version of Microsoft's browser that is based on Chromium, the same technology behind Google Chrome. In this article, we will tell you what Edge 80 is, why you should download it, how to download and install it on your device, and how to customize and use it to suit your needs.</p>
4
- <h2>What is Edge 80 and why should you download it?</h2>
5
- <p>Edge 80 is the new version of Microsoft's browser that was released in February 2020. It is based on Chromium, which means it has the same core engine and some shared features with Google Chrome. However, Edge 80 also has many unique features that make it stand out from other browsers.</p>
6
- <h2>download edge 80</h2><br /><p><b><b>Download</b> &#8250; <a href="https://urlin.us/2uSSuL">https://urlin.us/2uSSuL</a></b></p><br /><br />
7
- <h3>Edge 80 is based on Chromium and offers faster, more secure, and more modern browsing experience than Internet Explorer and Microsoft Edge Legacy</h3>
8
- <p>One of the main reasons to download Edge 80 is that it offers a faster, more secure, and more modern browsing experience than Internet Explorer and Microsoft Edge Legacy. Internet Explorer is an outdated browser that is no longer supported by Microsoft. Microsoft Edge Legacy is the previous version of Microsoft's browser that was based on a different engine. Both browsers have compatibility issues with some websites and web apps. They also have performance issues and security vulnerabilities.</p>
9
- <p>Edge 80 is based on Chromium, which means it has better compatibility with websites and web apps. It also has better performance and security than Internet Explorer and Microsoft Edge Legacy. According to some tests by Avast , Edge 80 is faster than Chrome in rendering and responsiveness. It also consumes less memory than Chrome. According to some tests by NSS Labs , Edge 80 blocks more malware downloads and phishing attacks than Chrome. It also has Microsoft Defender SmartScreen , which warns you about unsafe websites and downloads.</p>
10
- <h3>Edge 80 has many features that make it stand out from other browsers</h3>
11
- <p>Another reason to download Edge 80 is that it has many features that make it stand out from other browsers. These features are designed to enhance your browsing experience in various aspects such as performance, security, productivity, gaming, AI-powered, and shopping. Here are some examples of these features:</p>
12
- <table>
13
- <tr>
14
- <th>Feature category</th>
15
- <th>Examples of features</th>
16
- </tr>
17
- <tr>
18
- <td>Performance</td>
19
- <td>Startup boost, Efficiency mode, Sleeping tabs, etc.</td>
20
- </tr>
21
- <tr>
22
- <td>Security</td>
23
- <td>Microsoft Defender SmartScreen, Password monitor, InPrivate browsing, etc.</td>
24
- </tr>
25
- <tr>
26
- <td>Productivity</td>
27
- <td>Collections, Vertical tabs, Sidebar, Webhint, etc.</td>
28
- </tr>
29
- <tr>
30
- <td>Gaming</td>
31
- <td>Clarity Boost, Gaming homepage, PC gaming performance, etc.</td>
32
- </tr>
33
- <tr>
34
- <td>AI-powered</td>
35
- <td>Bing Chat, Bing Image Creator, Compose, etc.</td>
36
- </tr>
37
- <tr>
38
- <td>Shopping</td>
39
- <td>Coupons, Price comparison, Shopping hub, etc.</td>
40
- </tr>
41
- </table>
42
- <p>You can learn more about these features and how to use them from the Microsoft Edge Features & Tips page . You can also access the Settings > Experiments page in DevTools to turn on or off experimental features that are still in development . These experiments could be unstable or unreliable and may require you to restart DevTools.</p>
43
- <h2>How to download and install Edge 80 on your device?</h2>
44
- <p>If you are convinced that Edge 80 is the browser for you, you might be wondering how to download and install it on your device. The good news is that it is very easy and straightforward. Here are the steps to follow:</p>
45
- <h3>Go to the Microsoft Edge web page and choose your device type (Windows, macOS, iOS, or Android)</h3>
46
- <p>The first step is to go to the Microsoft Edge web page and choose your device type from the options available. You can download Edge 80 for Windows 10 / 8.1 / 8 / 7 (32-bit or 64-bit), macOS (Intel or Apple Silicon), iOS (iPhone or iPad), or Android (phone or tablet). You can also download Edge Dev for Windows 11 / 10 or Edge for Linux (.deb) or Linux (.rpm) if you want to try the preview build channels . These channels are updated weekly and may have bugs or issues.</p>
47
- <h3>Follow the instructions to download and install Edge 80 on your device</h3>
48
- <p>The next step is to follow the instructions to download and install Edge 80 on your device. Depending on your device type, you may need to run the installer file, accept the license agreement, choose a location for installation, and restart your device. The installation process should not take more than a few minutes. Once it is done, you can launch Edge 80 from your desktop or start menu.</p>
49
- <h3>You can also import your data from another browser if you want to switch to Edge 80</h3>
50
- <p>If you want to switch to Edge 80 from another browser, such as Chrome or Firefox, you can also import your data from that browser. This includes your bookmarks, history, passwords, settings, and more. To do this, you can go to Settings > Profiles > Import browser data in Edge 80 and choose the browser and data types you want to import. You can also choose whether to make Edge 80 your default browser during the installation process or later from the Settings > Default browser page.</p>
51
- <p>download edge 80 for windows 10<br />
52
- download edge 80 for mac<br />
53
- download edge 80 for linux<br />
54
- download edge 80 for android<br />
55
- download edge 80 for ios<br />
56
- download edge 80 offline installer<br />
57
- download edge 80 msi<br />
58
- download edge 80 filehippo<br />
59
- download edge 80 update<br />
60
- download edge 80 beta<br />
61
- download edge 80 dev<br />
62
- download edge 80 canary<br />
63
- download edge 80 stable<br />
64
- download edge 80 chromium<br />
65
- download edge 80 free<br />
66
- download edge 80 full version<br />
67
- download edge 80 latest version<br />
68
- download edge 80 old version<br />
69
- download edge 80 portable<br />
70
- download edge 80 zip file<br />
71
- download edge 80 from microsoft<br />
72
- download edge 80 from official site<br />
73
- download edge 80 from google play store<br />
74
- download edge 80 from app store<br />
75
- download edge 80 from softonic<br />
76
- download edge 80 with vpn<br />
77
- download edge 80 with extensions<br />
78
- download edge 80 with dark mode<br />
79
- download edge 80 with sync<br />
80
- download edge 80 with pdf reader<br />
81
- how to download edge 80 on windows 7<br />
82
- how to download edge 80 on windows server<br />
83
- how to download edge 80 on ubuntu<br />
84
- how to download edge 80 on chromebook<br />
85
- how to download edge 80 on firestick<br />
86
- how to download edge 80 without admin rights<br />
87
- how to download edge 80 without internet connection<br />
88
- how to download edge 80 without microsoft account<br />
89
- how to download edge 80 in silent mode<br />
90
- how to download edge 80 in background<br />
91
- why should i download edge 80 browser<br />
92
- why can't i download edge 80 browser<br />
93
- where can i download edge 80 browser safely<br />
94
- where is the downloaded file of edge 80 browser located <br />
95
- what is the size of the downloaded file of edge 80 browser <br />
96
- what are the benefits of downloading edge 80 browser <br />
97
- what are the requirements for downloading edge 80 browser <br />
98
- what are the steps for downloading and installing edge 80 browser <br />
99
- what are the problems of downloading and installing edge 80 browser</p>
100
- <h2>How to customize and use Edge 80 to suit your needs?</h2>
101
- <p>Now that you have downloaded and installed Edge 80 on your device, you might want to customize and use it to suit your needs. There are many ways you can do this. Here are some examples:</p>
102
- <h3>You can change the default search engine from Bing to Google or any other search engine you prefer</h3>
103
- <p>By default, Edge 80 uses Bing as its default search engine. Bing is a powerful and intelligent search engine that offers many features and benefits. However, if you prefer another search engine, such as Google or DuckDuckGo, you can change it easily. To do this, you can go to Settings > Privacy, search and services > Address bar and search > Search engine used in the address bar and choose the search engine you want. You can also add a new search engine if it is not listed there.</p>
104
- <h3>You can use Collections, Vertical tabs, Sidebar, Webhint, and other productivity features to organize and enhance your browsing experience</h3>
105
- <p>Edge 80 has many productivity features that can help you organize and enhance your browsing experience. For example:</p>
106
- <ul>
107
- <li>You can use Collections to collect and organize web content such as images, text, links, videos, etc. You can also export your collections to Word or Excel documents.</li>
108
- <li>You can use Hello, I am a high-class content writer and I will write a 500-word article on the topic of "download edge 80". Here is the outline of the article and the article itself with HTML formatting. Outline of the article: - H1: Download Edge 80: How to Get the Latest Version of Microsoft's Browser - H2: What is Edge 80 and why should you download it? - H3: Edge 80 is based on Chromium and offers faster, more secure, and more modern browsing experience than Internet Explorer and Microsoft Edge Legacy - H3: Edge 80 has many features that make it stand out from other browsers, such as performance, security, productivity, gaming, AI-powered, and shopping features - H2: How to download and install Edge 80 on your device? - H3: Go to the Microsoft Edge web page and choose your device type (Windows, macOS, iOS, or Android) - H3: Follow the instructions to download and install Edge 80 on your device - H3: You can also download Edge Dev for Windows 11 / 10 or Edge for Linux (.deb) or Linux (.rpm) if you want to try the preview build channels - H2: How to customize and use Edge 80 to suit your needs? - H3: You can change the default search engine from Bing to Google or any other search engine you prefer - H3: You can access the Settings > Experiments page in DevTools to turn on or off experimental features that are still in development - H3: You can use Collections, Vertical tabs, Sidebar, Webhint, and other productivity features to organize and enhance your browsing experience - H3: You can use Clarity Boost, Gaming homepage, PC gaming performance, and other gaming features to enjoy a better gaming experience on the web - H3: You can use Microsoft Defender SmartScreen, Password monitor, and other security features to protect yourself from malware, phishing, and other online threats - H3: You can use Bing Chat, Bing Image Creator, Compose, and other AI-powered features to get more out of your web searches and interactions - H3: You can use Coupons, Price comparison, and other shopping features to save money and time when shopping online - H2: Conclusion - H2: FAQs Article with HTML formatting: <h1>Download Edge 80: How to Get the Latest Version of Microsoft's Browser</h1>
109
- <p>If you are looking for a fast, secure, and modern browser that offers a lot of features and benefits, you might want to download Edge 80. Edge 80 is the latest version of Microsoft's browser that is based on Chromium, the same technology behind Google Chrome. In this article, we will tell you what Edge 80 is, why you should download it, how to download and install it on your device, and how to customize and use it to suit your needs.</p>
110
- <h2>What is Edge 80 and why should you download it?</h2>
111
- <p>Edge 80 is the new version of Microsoft's browser that was released in February 2020. It is based on Chromium, which means it has the same core engine and some shared features with Google Chrome. However, Edge 80 also has many unique features that make it stand out from other browsers.</p>
112
- <h3>Edge 80 is based on Chromium and offers faster, more secure, and more modern browsing experience than Internet Explorer and Microsoft Edge Legacy</h3>
113
- <p>One of the main reasons to download Edge 80 is that it offers a faster, more secure, and more modern browsing experience than Internet Explorer and Microsoft Edge Legacy. Internet Explorer is an outdated browser that is no longer supported by Microsoft. Microsoft Edge Legacy is the previous version of Microsoft's browser that was based on a different engine. Both browsers have compatibility issues with some websites and web apps. They also have performance issues and security vulnerabilities.</p>
114
- <p>Edge 80 is based on Chromium, which means it has better compatibility with websites and web apps. It also has better performance and security than Internet Explorer and Microsoft Edge Legacy. According to some tests by Avast , Edge 80 is faster than Chrome in rendering and responsiveness. It also consumes less memory than Chrome. According to some tests by NSS Labs , Edge 80 blocks more malware downloads and phishing attacks than Chrome. It also has Microsoft Defender SmartScreen , which warns you about unsafe websites and downloads.</p>
115
- <h3>Edge 80 has many features that make it stand out from other browsers</h3>
116
- <p>Another reason to download Edge 80 is that it has many features that make it stand out from other browsers. These features are designed to enhance your browsing experience in various aspects such as performance, security, productivity, gaming, AI-powered, and shopping. Here are some examples of these. features:</p>
117
- <table>
118
- <tr>
119
- <th>Feature category</th>
120
- <th>Examples of features</th>
121
- </tr>
122
- <tr>
123
- <td>Performance</td>
124
- <td>Startup boost, Efficiency mode, Sleeping tabs, etc.</td>
125
- </tr>
126
- <tr>
127
- <td>Security</td>
128
- <td>Microsoft Defender SmartScreen, Password monitor, InPrivate browsing, etc.</td>
129
- </tr>
130
- <tr>
131
- <td>Productivity</td>
132
- <td>Collections, Vertical tabs, Sidebar, Webhint, etc.</td>
133
- </tr>
134
- <tr>
135
- <td>Gaming</td>
136
- <td>Clarity Boost, Gaming homepage, PC gaming performance, etc.</td>
137
- </tr>
138
- <tr>
139
- <td>AI-powered</td>
140
- <td>Bing Chat, Bing Image Creator, Compose, etc.</td>
141
- </tr>
142
- <tr>
143
- <td>Shopping</td>
144
- <td>Coupons, Price comparison, Shopping hub, etc.</td>
145
- </tr>
146
- </table>
147
- <p>You can learn more about these features and how to use them from the Microsoft Edge Features & Tips page . You can also access the Settings > Experiments page in DevTools to turn on or off experimental features that are still in development . These experiments could be unstable or unreliable and may require you to restart DevTools.</p>
148
- <h2>How to download and install Edge 80 on your device?</h2>
149
- <p>If you are convinced that Edge 80 is the browser for you, you might be wondering how to download and install it on your device. The good news is that it is very easy and straightforward. Here are the steps to follow:</p>
150
- <h3>Go to the Microsoft Edge web page and choose your device type (Windows, macOS, iOS, or Android)</h3>
151
- <p>The first step is to go to the Microsoft Edge web page and choose your device type from the options available. You can download Edge 80 for Windows 10 / 8.1 / 8 / 7 (32-bit or 64-bit), macOS (Intel or Apple Silicon), iOS (iPhone or iPad), or Android (phone or tablet). You can also download Edge Dev for Windows 11 / 10 or Edge for Linux (.deb) or Linux (.rpm) if you want to try the preview build channels . These channels are updated weekly and may have bugs or issues.</p>
152
- <h3>Follow the instructions to download and install Edge 80 on your device</h3>
153
- <p>The next step is to follow the instructions to download and install Edge 80 on your device. Depending on your device type, you may need to run the installer file, accept the license agreement, choose a location for installation, and restart your device. The installation process should not take more than a few minutes. Once it is done, you can launch Edge 80 from your desktop or start menu.</p>
154
- <h3>You can also import your data from another browser if you want to switch to Edge 80</h3>
155
- <p>If you want to switch to Edge 80 from another browser, such as Chrome or Firefox, you can also import your data from that browser. This includes your bookmarks, history, passwords, settings, and more. To do this, you can go to Settings > Profiles > Import browser data in Edge 80 and choose the browser and data types you want to import. You can also choose whether to make Edge 80 your default browser during the installation process or later from the Settings > Default browser page.</p>
156
- <h2>How to customize and use Edge 80 to suit your needs?</h2>
157
- <p>Now that you have downloaded and installed Edge 80 on your device, you might want to customize and use it to suit your needs. There are many ways you can do this. Here are some examples:</p>
158
- <h3>You can change the default search engine from Bing to Google or any other search engine you prefer</h3>
159
- <p>By default, Edge 80 uses Bing as its default search engine. Bing is a powerful and intelligent search engine that offers many features and benefits. However, if you prefer another search engine, such as Google or DuckDuckGo, you can change it easily. To do this, you can go to Settings > Privacy, search and services > Address bar and search > Search engine used in the address bar and choose the search engine you want. You can also add a new search engine if it is not listed there.</p>
160
- <h3>You can use Collections, Vertical tabs, Sidebar, Webhint, and other productivity features to organize and enhance your browsing experience</h3>
161
- <p>Edge 80 has many productivity features that can help you organize and enhance your browsing experience. For example:</p>
162
- <ul>
163
- <li>You can use Collections to collect and organize web content such as images, text, links, videos, etc. You can also export your collections to Word or Excel documents.</li>
164
- <li>You can use Vertical tabs to switch between tabs in a vertical layout on the left side of the browser. This can help you see more tabs at once and manage them easily.</li>
165
- <li>You can use Sidebar to access your favorites, history, collections, and downloads in a slide-out panel on the right side of the browser. This can help you access your web content without leaving the current page.</li>
166
- <li>You can use Webhint to get suggestions and feedback on how to improve your website's performance, accessibility, security, and more. This can help you optimize your website for better user experience and SEO.</li>
167
- </ul>
168
- <h3>You can use Clarity Boost, Gaming homepage, PC gaming performance, and other gaming features to enjoy a better gaming experience on the web</h3>
169
- <p>Edge 80 has many gaming features that can help you enjoy a better gaming experience on the web. For example:</p>
170
- <ul>
171
- <li>You can use Clarity Boost to enhance the contrast and clarity of the images and videos on the web. This can help you see more details and colors in your games.</li>
172
- <li>You can use Gaming homepage to access a curated collection of games, news, reviews, videos, and more from the web. This can help you discover new games and stay updated on the latest gaming trends.</li>
173
- <li>You can use PC gaming performance to optimize your PC settings for better gaming performance. This can help you improve your frame rate, resolution, and graphics quality in your games.</li>
174
- </ul>
175
- <h3>You can use Microsoft Defender SmartScreen, Password monitor, and other security features to protect yourself from malware, phishing, and other online threats</h3>
176
- <p>Edge 80 has many security features that can help you protect yourself from malware, phishing, and other online threats. For example:</p>
177
- <ul>
178
- <li>You can use Microsoft Defender SmartScreen to warn you about unsafe websites and downloads. This can help you avoid malware, phishing, and other online scams.</li>
179
- <li>You can use Password monitor to alert you if your passwords are found in data breaches. This can help you change your passwords and secure your accounts.</li>
180
- <li>You can use InPrivate browsing to browse the web without saving your browsing history, cookies, or other data. This can help you protect your privacy and prevent tracking.</li>
181
- </ul>
182
- <h3>You can use Bing Chat, Bing Image Creator, Compose, and other AI-powered features to get more out of your web searches and interactions</h3>
183
- <p>Edge 80 has many AI-powered features that can help you get more out of your web searches and interactions. For example:</p>
184
- <ul>
185
- <li>You can use Bing Chat to chat with an AI assistant that can answer your questions, give you suggestions, and perform tasks for you. This can help you save time and get things done faster.</li>
186
- <li>You can use Bing Image Creator to create custom images from text or emojis. This can help you express yourself creatively and share your images with others.</li>
187
- <li>You can use Compose to generate text from keywords or phrases. This can help you write faster and easier with AI assistance.</li>
188
- </ul>
189
- <h3>You can use Coupons, Price comparison, and other shopping features to save money and time when shopping online</h3>
190
- <p>Edge 80 has many shopping features that can help you save money and time when shopping online. For example:</p>
191
- <ul>
192
- <li>You can use Coupons to find and apply coupons for the products or services you want to buy. This can help you save money on your purchases.</li>
193
- <li>You can use Price comparison to compare prices from different sellers for the same product. This can help you find the best deal for your budget.</li>
194
- <li>You can use Shopping hub to access a personalized dashboard of your shopping activities, such as your wish list, cart, orders, and more. This can help you manage your shopping easier and faster.</li>
195
- </ul>
196
- <h2>Conclusion</h2>
197
- <p>In conclusion, Edge 80 is a fast, secure, and modern browser that offers a lot of features and benefits. It is based on Chromium and offers better compatibility, performance, and security than Internet Explorer and Microsoft Edge Legacy. It also has many unique features that enhance your browsing experience in various aspects such as performance, security, productivity, gaming, AI-powered, and shopping. You can download and install Edge 80 on your device easily and customize and use it to suit your needs. If you want to get the latest version of Microsoft's browser, you should download Edge 80 today.</p>
198
- <h2>FAQs</h2>
199
- <p>Here are some frequently asked questions about Edge 80:</p>
200
- <h3>Is Edge 80 free?</h3>
201
- <p>Yes, Edge 80 is free to download and use. You do not need to pay anything to use Edge 80.</p>
202
- <h3>Is Edge 80 safe?</h3>
203
- <p>Yes, Edge 80 is safe to use. It has many security features that protect you from malware, phishing, and other online threats. It also respects your privacy and does not collect or share your personal data without your consent.</p>
204
- <h3>Is Edge 80 compatible with Chrome extensions?</h3>
205
- <p>Yes, Edge 80 is compatible with most Chrome extensions. You can install Chrome extensions from the Chrome Web Store or the Microsoft Edge Add-ons Store . You can also manage your extensions from the Settings > Extensions page in Edge 80.</p>
206
- <h3>How do I update Edge 80?</h3>
207
- <p>Edge 80 updates automatically in the background. You do not need to do anything to update Edge 80. However, you can also check for updates manually from the Settings > About Microsoft Edge page in Edge 80. You can also switch to a different build channel if you want to get more frequent or less frequent updates.</p>
208
- <h3>How do I uninstall Edge 80?</h3>
209
- <p>If you want to uninstall Edge 80 from your device, you can follow the steps below:</p>
210
- <ul>
211
- <li>For Windows: Go to Settings > Apps > Apps & features and find Microsoft Edge in the list. Click on it and then click on Uninstall. Follow the instructions to complete the uninstallation process.</li>
212
- <li>For macOS: Go to Finder > Applications and drag Microsoft Edge to the Trash. Empty the Trash to complete the uninstallation process.</li>
213
- <li>For iOS: Tap and hold on the Microsoft Edge app icon on your home screen until it starts to jiggle. Tap on the X icon on the top left corner of the app icon and then tap on Delete. Confirm your action to complete the uninstallation process.</li>
214
- <li>For Android: Go to Settings > Apps & notifications > See all apps and find Microsoft Edge in the list. Tap on it and then tap on Uninstall. Confirm your action to complete the uninstallation process.</li>
215
- </ul></p> 197e85843d<br />
216
- <br />
217
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/ 2023 PDF .md DELETED
@@ -1,175 +0,0 @@
1
- <br />
2
- <h1>UPSC Syllabus PDF Download 2023 in Hindi</h1>
3
- <p>यूनियन पब्लिक सर्विस कमीशन (UPSC) भारत की सबसे प्रतिष्ठित और कठिन परीक्षा है। इस परीक्षा के माध्यम से भारतीय प्रशासनिक सेवा (IAS), भारतीय पुलिस सेवा (IPS), भारतीय वन सेवा (IFS) और अन्य केंद्रीय सेवाओं में भर्ती की जाती है। UPSC परीक्षा को तीन चरणों में आयोजित किया जाता है- प्रारंभिक परीक्षा (Prelims), मुख्य परीक्षा (Mains) और साक्षात्कार (Interview)। प्रत्येक चरण का अपना एक पैटर्न और पाठ्यक्रम होता है। UPSC परीक्षा की तैयारी करने के लिए सबसे महत्वपूर्ण है कि उम्मीदवारों को प <p>यूनियन पब्लिक सर्विस कमीशन (UPSC) भारत की सबसे प्रतिष्ठित और कठिन परीक्षा है। इस परीक्षा के माध्यम से भारतीय प्रशासनिक सेवा (IAS), भारतीय पुलिस सेवा (IPS), भारतीय वन सेवा (IFS) और अन्य केंद्रीय सेवाओं में भर्ती की जाती है। UPSC परीक्षा को तीन चरणों में आयोजित किया जाता है- प्रारंभिक परीक्षा (Prelims), मुख्य परीक्षा (Mains) और साक्षात्कार (Interview)। प्रत्येक चरण का अपना एक पैटर्न और पाठ्यक्रम होता है। UPSC परीक्षा की तैयारी करने के लिए सबसे महत्वपूर्ण है कि उम्मीदवारों को पूरे पाठ्यक्रम को समझना और उसके हिसाब से पढ़ाई करना होता है।</p>
4
- <h2>UPSC Syllabus for Prelims 2023 in Hindi</h2>
5
- <p>UPSC Prelims 2023 के लिए पाठ्यक्रम में कोई बदलाव नहीं हुआ है। UPSC Prelims 2023 में दो पेपर होंगे- General Studies Paper 1 और General Studies Paper 2 (CSAT)। प्रत्येक पेपर में 100 प्रश्न होंगे, जिनके लिए 2-2 घंटे का समय मिलेगा। General Studies Paper 1 में 200 अंक, और General Studies Paper 2 में 200 अंक होंगे। General Studies Paper 2 (CSAT) में कम से कम 33% (66) अंक प्राप्त करना अनिवार्य है, वरना General Studies Paper 1 का मूल्यांकन नहीं होगा। General Studies Paper 1 के माध्यम से ही Prelims के मेरिट सूची में सम्मिलित होने के लिए Cut Off Marks निर्धारित किए जाएंगे।</p>
6
- <h2>upsc syllabus pdf download 2023 in hindi</h2><br /><p><b><b>Download Zip</b> ===> <a href="https://jinyurl.com/2uNPwE">https://jinyurl.com/2uNPwE</a></b></p><br /><br />
7
- <h3>General Studies Paper 1</h3>
8
- <p>General Studies Paper 1 में निम्नलिखित Topics and Subtopics Cover होंगे:</p>
9
- <ul>
10
- <li>History of India and Indian National Movement: Ancient India, Medieval India, Modern India, Indian Freedom Struggle, Post-Independence India</li>
11
- <li>Indian and World Geography: Physical Geography, Human Geography, Economic Geography, Regional Geography</li>
12
- <li>Indian Polity and Governance: Constitution of India, Fundamental Rights and Duties, Directive Principles of State Policy, Union Government, State Government, Local Government, Judiciary, Election Commission, Comptroller and Auditor General, Attorney General and Solicitor General, Union Public Service Commission, Public Policy, Rights Issues, etc.</li>
13
- <li>Indian Economy and Social Development: Basic Economic Concepts, National Income and Accounting, Planning and Budgeting, Poverty and Unemployment, Inflation and Monetary Policy, Fiscal Policy and Taxation, Banking and Financial Sector, Agriculture and Rural Development, Industry and Infrastructure, Services Sector, Foreign Trade and Balance of Payments, International Institutions and Organizations, Social Sector Initiatives, Sustainable Development Goals, etc.</li>
14
- <li>Environmental Ecology, Biodiversity and Climate Change: Ecology and Ecosystems, Biodiversity and Conservation, Environmental Issues and Pollution, Climate Change and Mitigation, Environmental Laws and Policies, International Environmental Conventions and Protocols, etc.</li>
15
- <li>General Science and Technology: Physics, Chemistry, Biology, Biotechnology, Space Technology, Defence Technology, Information Technology, Nuclear Technology, Nanotechnology, etc.</li>
16
- <li>Current Events of National and International Importance: Important News and Events related to the above Topics and Subtopics</li>
17
- </ul>
18
- <h3>General Studies Paper 2 (CSAT)</h3>
19
- <p>General Studies Paper 2 (CSAT) में निम्नलिखित Topics and Subtopics Cover होंगे:</p>
20
- <ul>
21
- <li>Comprehension: Reading Comprehension of Passages in Hindi and English</li>
22
- <li>Logical Reasoning and Analytical Ability: Statement and Assumptions, Statement and Arguments, Statement and Conclusions, Syllogism, Analogies, Coding-Decoding, Blood Relations, Direction Sense Test, Ranking Test, Seating Arrangement, Puzzles, etc.</li>
23
- <li>Decision Making and Problem Solving: Decision Making based on given Situations or Case Studies, Problem Solving based on given Data or Information</li>
24
- <li>Basic Numeracy: Numbers and their Relations, Order of Magnitude, Percentage, Ratio and Proportion, Average, Profit and Loss, Simple Interest and Compound Interest, Time and Work, Time and Distance, etc.</li>
25
- <li>Data Interpretation: Interpretation and Analysis of Data presented in Tables, Charts, Graphs, etc.</li>
26
- <li>General Mental Ability: General Intelligence and Mental Ability Questions</li>
27
- <li>Basic English Language Skills: Grammar, Vocabulary, Synonyms and Antonyms, Idioms and Phrases, etc.</li>
28
- </ul>
29
- <h2>UPSC Syllabus for Mains 2023 in Hindi</h2>
30
- <p>UPSC Mains 2023 के लिए पाठ्यक्रम में भी कोई बदलाव नहीं हुआ है। UPSC Mains 2023 में कुल 9 पेपर होंगे, जिनमें से 2 पेपर Qualifying Nature के होंगे और 7 पेपर Merit Ranking के लिए Count होंगे। Qualifying Papers में Indian Language और English होंगे, जिनके लिए 300-300 अंक होंगे। Merit Papers में Essay, General Studies 1, General Studies 2, General Studies 3, General Studies 4, Optional Paper 1 और Optional Paper 2 होंगे, जिनके लिए 250-250 अंक होंगे। प्रत्येक पेपर का समय 3 घंटे होगा।</p>
31
- <h3>Qualifying Papers</h3>
32
- <p>Qualifying Papers में Indian Language और English होंगे। Indian Language का पेपर UPSC की सूची में से किसी भी भारतीय भाषा में हो सकता है। English का पेपर English Language में होगा। Qualifying Papers का मार्क्स Merit Ranking में Count नहीं होगा, लेकिन Qualify करना अनिवार्य होगा। Qualifying Papers में कम से कम 25% (75) अंक प्राप्त करना होगा। Qualifying Papers में Comprehension, Precis Writing, Translation, Grammar, Vocabulary, etc. Cover होंगे।</p>
33
- <p>upsc syllabus in hindi pdf free download 2023<br />
34
- upsc syllabus 2023 in hindi medium pdf<br />
35
- upsc ias syllabus 2023 in hindi pdf<br />
36
- upsc prelims syllabus 2023 pdf download in hindi<br />
37
- upsc mains syllabus 2023 pdf download in hindi<br />
38
- upsc syllabus for civil services exam 2023 in hindi pdf<br />
39
- upsc syllabus for ias prelims and mains 2023 in hindi pdf<br />
40
- upsc syllabus for optional subjects 2023 in hindi pdf<br />
41
- upsc syllabus for general studies paper 1 2023 in hindi pdf<br />
42
- upsc syllabus for general studies paper 2 2023 in hindi pdf<br />
43
- upsc syllabus for general studies paper 3 2023 in hindi pdf<br />
44
- upsc syllabus for general studies paper 4 2023 in hindi pdf<br />
45
- upsc syllabus for essay paper 2023 in hindi pdf<br />
46
- upsc syllabus for indian language paper 2023 in hindi pdf<br />
47
- upsc syllabus for english language paper 2023 in hindi pdf<br />
48
- upsc syllabus for history optional 2023 in hindi pdf<br />
49
- upsc syllabus for geography optional 2023 in hindi pdf<br />
50
- upsc syllabus for sociology optional 2023 in hindi pdf<br />
51
- upsc syllabus for public administration optional 2023 in hindi pdf<br />
52
- upsc syllabus for political science optional 2023 in hindi pdf<br />
53
- upsc syllabus for economics optional 2023 in hindi pdf<br />
54
- upsc syllabus for psychology optional 2023 in hindi pdf<br />
55
- upsc syllabus for philosophy optional 2023 in hindi pdf<br />
56
- upsc syllabus for anthropology optional 2023 in hindi pdf<br />
57
- upsc syllabus for commerce and accountancy optional 2023 in hindi pdf<br />
58
- upsc syllabus for mathematics optional 2023 in hindi pdf<br />
59
- upsc syllabus for statistics optional 2023 in hindi pdf<br />
60
- upsc syllabus for physics optional 2023 in hindi pdf<br />
61
- upsc syllabus for chemistry optional 2023 in hindi pdf<br />
62
- upsc syllabus for zoology optional 2023 in hindi pdf<br />
63
- upsc syllabus for botany optional 2023 in hindi pdf<br />
64
- upsc syllabus for agriculture optional 2023 in hindi pdf<br />
65
- upsc syllabus for animal husbandry and veterinary science optional 2023 in hindi pdf<br />
66
- upsc syllabus for medical science optional 2023 in hindi pdf<br />
67
- upsc syllabus for engineering optional 2023 in hindi pdf<br />
68
- upsc syllabus for law optional 2023 in hindi pdf<br />
69
- upsc syllabus for management optional 2023 in hindi pdf<br />
70
- upsc syllabus for literature optional 2023 in hindi pdf<br />
71
- how to download upsc syllabus pdf file in hindi language 2023<br />
72
- best website to download upsc syllabus pdf format in hindi version 2023<br />
73
- latest updates on upsc syllabus changes and modifications 2023 in hindi online <br />
74
- tips and tricks to cover the entire upsc syllabus effectively and efficiently 2023 in hindi video <br />
75
- important topics and questions from the previous year papers of the upsc exam based on the current syllabus pattern and trend analysis of the examiners' expectations and preferences of the candidates' performance and preparation level of the examiners' expectations and preferences of the candidates' performance and preparation level of the examiners' expectations and preferences of the candidates' performance and preparation level of the examiners' expectations and preferences of the candidates' performance and preparation level of the examiners' expectations and preferences of the candidates' performance and preparation level of the examiners' expectations and preferences of the candidates' performance and preparation level of the examiners' expectations and preferences of the candidates' performance and preparation level of the examiners' expectations and preferences of the candidates' performance and preparation level of the examiners' expectations and preferences of the candidates' performance and preparation level of the examiners' expectations and preferences of the candidates' performance and preparation level of the examiners' expectations and preferences of the candidates' performance and preparation level of the examiners' expectations and preferences of the candidates' performance and preparation level of the examiners' expectations and preferences of the candidates' performance and preparation level of the examiners' expectations and preferences of the candidates'</p>
76
- <h3>Papers to be counted for Merit</h3>
77
- <p>Papers to be counted for Merit में Essay, General Studies 1, General Studies 2, General Studies 3, General Studies 4, Optional Paper 1 और Optional Paper 2 होंगे। Essay का पेपर Hindi या English में हो सकता है। General Studies के पेपर English में ही होंगे। Optional Paper का पेपर UPSC की सूची में से किसी भी Subject में हो सकता है। Papers to be counted for Merit का मार्क्स Merit Ranking में Count होगा। Papers to be counted for Merit में Essay Writing, Analytical and Critical Writing, Current Affairs and General Knowledge, Conceptual and Theoretical Understanding, Subject Knowledge and Application, etc. Cover होंगे।</p>
78
- <h2>UPSC Syllabus for Interview 2023 in Hindi</h2>
79
- <p>UPSC Interview 2023 के लिए पाठ्यक्रम में भी कोई बदलाव नहीं हुआ है। UPSC Interview 2023 में Personality Test होगा, जिसके लिए 275 अंक होंगे। Personality Test में UPSC की Board Members के सामने साक्षात्कार होगा, जिसमें उम्मीदवार की Personality, Mental Alertness, Critical Powers of Assimilation, Clear and Logical Exposition, Balance of Judgement, Variety and Depth of Interest, Ability for Social Cohesion and Leadership, Intellectual and Moral Integrity, etc. का मूल्यांकन किया जाएगा। Personality Test की तैयारी के लिए उम्मीदवारों को अपने DAF (Detailed Application Form) को अच्छी तरह से पढ़ना, Current Affairs और General Knowledge को Update रखना, Mock Interviews लेना, Confidence और Communication Skills को Improve करना, Honesty और Integrity को Demonstrate करना, etc. होगा।</p>
80
- <h2>How to Download UPSC Syllabus PDF in Hindi 2023</h2>
81
- <p>UPSC Syllabus PDF in Hindi 2023 को Download करने के लिए निम्नलिखित Steps Follow करें:</p>
82
- <ol>
83
- <li>UPSC की Official Website <a href="">www.upsc.gov.in</a> पर Visit करें।</li>
84
- <li>Home Page पर Examinations Tab पर Click करें।</li>
85
- <li>Examinations Tab में Active Examinations Link पर Click करें।</li>
86
- <li>Active Examinations List में Civil Services (Preliminary) Examination 2023 Link पर Click करें।</li>
87
- <li>Civil Services (Preliminary) Examination 2023 Page पर Notice Link पर Click करें।</li>
88
- <li>Notice Page पर Hindi Version Link पर Click करें।</li>
89
- <li>Hindi Version Page पर UPSC Syllabus PDF in Hindi 2023 Download हो जाएगा।</li>
90
- </ol>
91
- <p>UPSC Syllabus PDF in Hindi 2023 को Download करने के लिए Other Sources भी Available हैं, जैसे Online Coaching Websites, Online Study Portals, Online Booksellers, etc. लेकिन UPSC Syllabus PDF in Hindi 2023 Download करते समय सत्यापन और सुरक्षा का ध्यान रखें।</p>
92
- <h2>How to Prepare for UPSC Exam 2023 in Hindi</h2>
93
- <p>UPSC Exam 2023 in Hindi की तैयारी के लिए Books and Resources, Tips and Strategies <p>UPSC Exam 2023 in Hindi की तैयारी के लिए Books and Resources, Tips and Strategies का उपयोग करना चाहिए। Books and Resources में निम्नलिखित सुझाए गए हैं:</p>
94
- <h3>Books and Resources</h3>
95
- <table>
96
- <tr>
97
- <th>Subject</th>
98
- <th>Books and Resources</th>
99
- </tr>
100
- <tr>
101
- <td>History</td>
102
- <td>NCERT Books (Class 6 to 12), Spectrum's A Brief History of Modern India, Bipin Chandra's India's Struggle for Independence, etc.</td>
103
- </tr>
104
- <tr>
105
- <td>Geography</td>
106
- <td>NCERT Books (Class 6 to 12), G C Leong's Certificate Physical and Human Geography, Majid Hussain's Geography of India, etc.</td>
107
- </tr>
108
- <tr>
109
- <td>Polity</td>
110
- <td>M Laxmikanth's Indian Polity, D D Basu's Introduction to the Constitution of India, Subhash Kashyap's Our Constitution, etc.</td>
111
- </tr>
112
- <tr>
113
- <td>Economy</td>
114
- <td>NCERT Books (Class 9 to 12), Ramesh Singh's Indian Economy, Sanjiv Verma's The Indian Economy, Economic Survey, Budget, etc.</td>
115
- </tr>
116
- <tr>
117
- <td>Environment</td>
118
- <td>NCERT Books (Class 12 Biology), Shankar IAS Academy's Environment, Ministry of Environment and Forests' Reports, etc.</td>
119
- </tr>
120
- <tr>
121
- <td>Science and Technology</td>
122
- <td>NCERT Books (Class 6 to 10), Science Reporter Magazine, The Hindu Science and Technology Page, etc.</td>
123
- </tr>
124
- <tr>
125
- <td>Current Affairs</td>
126
- <td>The Hindu Newspaper, Yojana Magazine, Kurukshetra Magazine, Rajya Sabha TV Debates, All India Radio News Analysis, etc.</td>
127
- </tr>
128
- <tr>
129
- <td>CSAT</td <td>CSAT</td>
130
- <td>R S Aggarwal's A Modern Approach to Verbal and Non-Verbal Reasoning, R S Aggarwal's Quantitative Aptitude for Competitive Examinations, Arihant's CSAT Paper 2 Practice Sets, etc.</td>
131
- </tr>
132
- <tr>
133
- <td>Essay</td>
134
- <td>Yojana Magazine, The Hindu Editorial Page, Rajya Sabha TV Big Picture, etc.</td>
135
- </tr>
136
- <tr>
137
- <td>Optional Subject</td>
138
- <td>Depends on the Choice of the Candidate. Refer to the UPSC Syllabus PDF in Hindi 2023 for the Detailed Syllabus of Each Optional Subject.</td>
139
- </tr>
140
- </table>
141
- <h3>Tips and Strategies</h3>
142
- <p>Tips and Strategies में निम्नलिखित सुझाए गए हैं:</p>
143
- <ul>
144
- <li>UPSC Syllabus PDF in Hindi 2023 को अच्छी तरह से पढ़ें और उसके हिसाब से Study Plan बनाएं।</li>
145
- <li>Newspaper पढ़ना रोजाना की आदत बनाएं। Newspaper से Current Affairs, General Knowledge, Vocabulary, etc. Improve होंगे।</li>
146
- <li>Notes बनाना और Revise करना न भूलें। Notes से Concepts Clear होंगे, Memory Boost होगी, और Revision Easy होगा।</li>
147
- <li>Previous Year Papers Solve करें। Previous Year Papers से Exam Pattern, Question Trend, Difficulty Level, Time Management, etc. का पता चलेगा।</li>
148
- <li>Mock Tests Attempt करें। Mock Tests से Preparation Level, Strengths and Weaknesses, Accuracy and Speed, etc. का पता चलेगा।</li>
149
- <li>Regularly Update करें। Regularly Update से Current Affairs, General Knowledge, New Developments, etc. का पता चलेगा।</li>
150
- <li>Self Study करें। Self Study से Confidence, Self Reliance, Self Motivation, etc. Increase होंगे।</li>
151
- <li>Guidance लें। Guidance से Doubts Clear होंगे, Tips and Tricks मिलेंगे, Feedback मिलेगा, etc.</li>
152
- <li>Health and Fitness का ध्यान रखें। Health and Fitness से Stress Relief होगा, Concentration Improve होगा, Energy Level Maintain होगा, etc.</li>
153
- </ul>
154
- <h2>Conclusion</h2>
155
- <p>UPSC Exam 2023 in Hindi की तैयारी करने के लिए UPSC Syllabus PDF in Hindi 2023 Download करना बहुत जरूरी है। UPSC Syllabus PDF in Hindi 2023 Download करने के बाद UPSC Syllabus PDF in Hindi 2023 को समझना और <p>UPSC Exam 2023 in Hindi की तैयारी करने के लिए UPSC Syllabus PDF in Hindi 2023 Download करना बहुत जरूरी है। UPSC Syllabus PDF in Hindi 2023 Download करने के बाद UPSC Syllabus PDF in Hindi 2023 को समझना और उसके हिसाब से Study Plan बनाना होगा। UPSC Syllabus PDF in Hindi 2023 में Prelims, Mains और Interview के लिए पूरे पाठ्यक्रम को विस्तार से दिया गया है। UPSC Syllabus PDF in Hindi 2023 को Download करने के लिए Official Website या Other Sources से Link मिल सकता है। UPSC Exam 2023 in Hindi की तैयारी के लिए Books and Resources, Tips and Strategies का भी प्रयोग करना चाहिए। UPSC Exam 2023 in Hindi में सफल होने के लिए Hard Work, Dedication, Consistency, Revision, Practice, etc. की जरूरत होती है।</p>
156
- <h2>FAQs</h2>
157
- <p>Q1. UPSC Syllabus PDF in Hindi 2023 कब Release होगा?</p>
158
- <p>A1. UPSC Syllabus PDF in Hindi 2023 Civil Services (Preliminary) Examination 2023 के साथ ही Release होगा, जो Expected है कि June 2023 में होगा।</p>
159
- <p>Q2. UPSC Syllabus PDF in Hindi 2023 में Prelims, Mains और Interview के लिए Same पाठ्यक्रम होगा?</p>
160
- <p>A2. No, UPSC Syllabus PDF in Hindi 2023 में Prelims, Mains और Interview के लिए Different पाठ्यक्रम होंगे। Prelims में General Studies Paper 1 और General Studies Paper 2 (CSAT) होंगे। Mains में Essay, General Studies 1, General Studies 2, General Studies 3, General Studies 4, Optional Paper 1 और Optional Paper 2 होंगे। Interview में Personality Test होगा।</p>
161
- <p>Q3. UPSC Syllabus PDF in Hindi 2023 में Optional Subject कैसे Choose करें?</p>
162
- <p>A3. UPSC Syllabus PDF in Hindi 2023 में Optional Subject Choose करने के लिए निम्नलिखित Factors Consider करें:</p>
163
- <ul>
164
- <li>Interest and Aptitude: Optional Subject में Interest and Aptitude होना चाहिए, जिससे Study Enjoyable and Easy हो।</li>
165
- <li>Syllabus and Difficulty Level: Optional Subject का Syllabus and Difficulty Level Manageable होना <p>चाहिए, जिससे Study Overwhelming and Difficult न हो।</li>
166
- <li>Availability and Quality of Books and Resources: Optional Subject के लिए Books and Resources Easily Available and Good Quality के होने चाहिए, जिससे Study Effective and Efficient हो।</li>
167
- <li>Scoring Potential and Success Rate: Optional Subject का Scoring Potential and Success Rate High होना चाहिए, जिससे Merit Ranking Improve हो।</li>
168
- <li>Overlap with General Studies: Optional Subject का General Studies के साथ Overlap होना चाहिए, जिससे Syllabus Coverage and Revision Easy हो।</li>
169
- </ul>
170
- <p>Q4. UPSC Syllabus PDF in Hindi 2023 में Negative Marking होगी?</p>
171
- <p>A4. Yes, UPSC Syllabus PDF in Hindi 2023 में Negative Marking होगी। Prelims में प्रत्येक Wrong Answer के लिए 1/3 Marks Deduct होंगे। Mains में प्रत्येक Irrelevant or Misleading Answer के लिए Marks Deduct हो सकते हैं। Interview में Negative Marking नहीं होगी।</p>
172
- <p>Q5. UPSC Syllabus PDF in Hindi 2023 में Language Papers का महत्व क्या है?</p>
173
- <p>A5. UPSC Syllabus PDF in Hindi 2023 में Language Papers का महत्व बहुत है। Language Papers में Indian Language और English होंगे, जो Qualifying Nature के होंगे। Language Papers में Comprehension, Precis Writing, Translation, Grammar, Vocabulary, etc. Cover होंगे। Language Papers में कम से कम 25% (75) अंक प्राप्त करना होगा, वरना Mains के Other Papers का Evaluation नहीं होगा। Language Papers से Candidates की Language Proficiency, Communication Skills, Expression Power, etc. का पता चलता है।</p> 401be4b1e0<br />
174
- <br />
175
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/7 Easy Ways to Improve Your Download Speed Right Now.md DELETED
@@ -1,97 +0,0 @@
1
- <br />
2
- <h1>How to Fix Slow Download Speed and Enjoy Faster Internet</h1>
3
- <p>Have you ever experienced the frustration of waiting for a file, a video, or a website to load, only to see the progress bar crawling at a snail's pace? If so, you are not alone. Slow download speed is a common problem that can affect your online activities, productivity, and entertainment. But don't worry, there are ways to fix it and enjoy faster internet.</p>
4
- <h2>download slow</h2><br /><p><b><b>Download</b> &#10002; &#10002; &#10002; <a href="https://jinyurl.com/2uNRpK">https://jinyurl.com/2uNRpK</a></b></p><br /><br />
5
- <h2>What is download speed and why does it matter?</h2>
6
- <p>Download speed is the rate at which data is transferred from the internet to your device. It is measured in megabits per second (Mbps). The higher the download speed, the faster you can access online content, such as images, videos, audio, files, and text. Download speed is important for various online activities, such as browsing the web, streaming media, downloading files, gaming, video conferencing, and more. A slow download speed can result in poor quality, buffering, interruptions, errors, and long waiting times.</p>
7
- <h3>How to measure your download speed</h3>
8
- <p>The easiest way to check your download speed is to use an online tool that can test your internet connection. You can simply type "internet speed test" into Google and click the "run speed test" button. This will show you how many Mbps your device is receiving from your internet service provider (ISP). You can also use other websites or apps that offer similar services, such as Speedtest.net or Fast.com.</p>
9
- <p>According to the Federal Communication Commission (FCC), a good download speed is at least 25 Mbps for most online activities. However, this may vary depending on how many devices and users are on your network at the same time, and what kind of content you are accessing. For example, if you want to stream HD video or play online games, you may need a higher download speed than if you just want to check your email or social media.</p>
10
- <h3>What causes slow download speed?</h3>
11
- <p>There are many factors that can affect your download speed, some of which are related to your device, some to your network, and some to your ISP. Here are some of the most common causes of slow download speed:</p>
12
- <ul>
13
- <li>Your device is overloaded with too many apps, programs, or browser tabs running at the same time.</li>
14
- <li>Your device is infected with malware or viruses that are slowing it down or stealing your bandwidth.</li>
15
- <li>Your device is outdated or has low memory or storage space.</li>
16
- <li>Your router or modem is faulty, old, or poorly configured.</li>
17
- <li>Your Wi-Fi signal is weak, unstable, or interfered by other devices or objects.</li>
18
- <li>Your internet plan is not suitable for your needs or usage habits.</li>
19
- <li>Your ISP is experiencing congestion, outages, or throttling.</li>
20
- <li>Your DNS server is slow or unreliable.</li>
21
- <li>You are downloading large files or multiple files at the same time.</li>
22
- <li>You are downloading from a slow or distant server.</li>
23
- </ul>
24
- <h3>How to increase your download speed: 15 tips and tricks</h3>
25
- <p>The good news is that there are many ways to improve your download speed and enjoy faster internet. Some of them are simple and easy to do yourself, while others may require more technical skills or professional assistance. Here are 15 tips and tricks that can help you increase your download speed:</p>
26
- <h4>Restart your device</h4>
27
- <p>This is one of the simplest and most effective ways to fix slow download speed. Restarting your device can clear any temporary files, cache data, or background processes that may be slowing it down. It can also refresh <h4>Test your internet speed</h4>
28
- <p>Another simple way to diagnose slow download speed is to test your internet speed using an online tool, as mentioned above. This can help you determine if the problem is with your device, your network, or your ISP. You can also compare your actual download speed with the speed that your ISP promises you in your internet plan. If there is a significant difference, you may want to contact your ISP and ask for an explanation or a solution.</p>
29
- <h4>Upgrade your internet plan</h4>
30
- <p>If your download speed is consistently lower than what you need or expect, it may be time to upgrade your internet plan. You can check with your ISP if they offer faster or more reliable plans that suit your needs and budget. You can also shop around and compare different ISPs and their plans to find the best deal. However, keep in mind that upgrading your internet plan may not guarantee faster download speed if there are other issues with your device or network.</p>
31
- <h4>Disable other devices connected to your router</h4>
32
- <p>One of the main factors that can affect your download speed is the number of devices and users that are connected to your router at the same time. The more devices and users, the more bandwidth is consumed and the slower the download speed. Therefore, you can try to disable or disconnect any devices that are not in use or that are not essential for your online activity. For example, you can turn off your smart TV, gaming console, printer, or other smart devices that may be using your Wi-Fi.</p>
33
- <p>How to boost your download speed<br />
34
- Why are my downloads so slow in Windows 10<br />
35
- How to increase download speed: 15 tips and tricks<br />
36
- How to fix slow download speed on PS4<br />
37
- How to speed up downloads on Xbox One<br />
38
- How to troubleshoot slow download speed on Steam<br />
39
- How to optimize your internet connection for faster downloads<br />
40
- How to use a download manager to improve download speed<br />
41
- How to change your DNS settings to increase download speed<br />
42
- How to disable background apps that slow down your downloads<br />
43
- How to clear your browser cache and cookies to boost download speed<br />
44
- How to update your network drivers for faster downloads<br />
45
- How to limit bandwidth usage of other devices on your network<br />
46
- How to check your download speed and compare it with your internet plan<br />
47
- How to use a VPN to bypass ISP throttling and increase download speed<br />
48
- How to enable parallel downloading in Chrome<br />
49
- How to adjust your router settings for optimal download speed<br />
50
- How to use Ethernet instead of Wi-Fi for faster downloads<br />
51
- How to move closer to your router or use a Wi-Fi extender<br />
52
- How to switch from 2.4 GHz to 5 GHz Wi-Fi band for faster downloads<br />
53
- How to scan your computer for malware that may slow down your downloads<br />
54
- How to defragment your hard drive for better download performance<br />
55
- How to disable Windows Update Delivery Optimization (WUDO)<br />
56
- How to change the download location or folder on your computer<br />
57
- How to pause and resume downloads in different browsers and apps<br />
58
- How to avoid seeding or uploading while downloading torrents<br />
59
- How to close other open apps and tabs while downloading<br />
60
- How to use a download accelerator or booster software<br />
61
- How to schedule your downloads for off-peak hours<br />
62
- How to contact your ISP or service provider for help with slow downloads<br />
63
- How to upgrade your internet speed or switch to a better plan<br />
64
- How to test different servers or mirrors for faster downloads<br />
65
- How to use a proxy server or a CDN service for faster downloads<br />
66
- How to disable firewalls or antivirus software that may interfere with downloads<br />
67
- How to enable QoS (Quality of Service) on your router for prioritizing downloads<br />
68
- How to flush your DNS cache and renew your IP address<br />
69
- How to check the health or availability of the files you are downloading<br />
70
- How to verify the integrity of the downloaded files<br />
71
- How to use a download splitter or joiner tool for large files<br />
72
- How to compress or zip files before downloading them</p>
73
- <h4>Clear your browser's cache and cookies</h4>
74
- <p>Your browser's cache and cookies are files that store information about the websites you visit and the data you enter. They can help improve your browsing experience by loading web pages faster and remembering your preferences. However, they can also accumulate over time and take up space on your device, which can slow down your download speed. Therefore, you can try to clear your browser's cache and cookies regularly to free up some space and speed up your downloads. You can do this by going to your browser's settings and finding the option to clear browsing data.</p>
75
- <h4>Use a wired connection instead of Wi-Fi</h4>
76
- <p>Wi-Fi is convenient and wireless, but it is also prone to interference, instability, and signal loss. These factors can reduce your download speed and affect your online experience. Therefore, if possible, you can try to use a wired connection instead of Wi-Fi for faster and more reliable downloads. You can do this by connecting your device directly to your router or modem using an Ethernet cable. This can eliminate any Wi-Fi issues and ensure a stable and secure connection.</p>
77
- <h4>Scan your device for malware and viruses</h4>
78
- <p>Malware and viruses are malicious software that can infect your device and cause various problems, such as slowing it down, stealing your data, or displaying unwanted ads. They can also affect your download speed by consuming your bandwidth or redirecting your traffic to malicious servers. Therefore, you should scan your device regularly for any malware or viruses and remove them as soon as possible. You can use a reputable antivirus software or app to do this, and make sure to keep it updated and active.</p>
79
- <h4>Pause or cancel other downloads and updates</h4>
80
- <p>Another factor that can affect your download speed is the number of downloads and updates that are running on your device or network at the same time. The more downloads and updates, the more bandwidth is used and the slower the download speed. Therefore, you can try to pause or cancel any downloads and updates that are not urgent or important for your online activity. For example, you can pause or cancel any software updates, app downloads, file transfers, or cloud backups that are not essential.</p>
81
- <h4>Change your DNS server</h4>
82
- <p>A DNS server is a service that translates domain names (such as www.google.com) into IP addresses (such as 172.217.14.206) that your device can understand and connect to. Your ISP usually assigns you a default DNS server, but it may not be the fastest or most reliable one. Therefore, you can try to change your DNS server to a different one that may offer better performance and security. You can do this by going to your device's network settings and finding the option to change your DNS server. You can choose from various free or paid DNS servers, such as Google Public DNS, Cloudflare DNS, or OpenDNS.</p>
83
- <h4>Use a download manager or accelerator</h4>
84
- <p>A download manager or accelerator is a software or app that can help you speed up your downloads by using various techniques, such as splitting files into smaller parts, resuming interrupted downloads, optimizing connections, or finding faster servers. You can use a download manager or accelerator to download large files or multiple files at the same time more efficiently and quickly. You can find various download managers or accelerators online, such as Internet Download Manager, Free Download Manager, or Download Accelerator Plus.</p>
85
- <h2>Conclusion</h2>
86
- <p>Slow download speed can be a frustrating and annoying problem that can affect your online experience and productivity. However, there are many ways to fix it and enjoy faster internet. In this article, we have shared 15 tips and tricks that can help you increase your download speed, such as restarting your device, testing your internet speed, upgrading your internet plan, disabling other devices connected to your router, clearing your browser's cache and cookies, using a wired connection instead of Wi-Fi, scanning your device for malware and viruses, pausing or canceling other downloads and updates, changing your DNS server, and using a download manager or accelerator. We hope that these tips and tricks will help you solve your slow download speed problem and improve your online experience.</p>
87
- <h2>FAQs</h2>
88
- <p>Here are some frequently asked questions about slow download speed and how to fix it:</p>
89
- <ul>
90
- <li><b>What is a good download speed?</b><br>A good download speed depends on various factors, such as your online activity, the number of devices and users on your network, and the quality of the content you are accessing. According to the FCC, a good download speed is at least 25 Mbps for most online activities. However, you may need a higher download speed if you want to stream HD video or play online games.</li>
91
- <li><b>How do I know if my download speed is slow?</b><br>You can check your download speed by using an online tool that can test your internet connection. You can simply type "internet speed test" into Google and click the "run speed test" button. This will show you how many Mbps your device is receiving from your ISP. You can also use other websites or apps that offer similar services, such as Speedtest.net or Fast.com.</li>
92
- <li><b>How do I fix slow download speed on Windows 10?</b><br>There are many ways to fix slow download speed on Windows 10, such as restarting your device, testing your internet speed, upgrading your internet plan, disabling other devices connected to your router, clearing your browser's cache and cookies, using a wired connection instead of Wi-Fi, scanning your device for malware and viruses, pausing or canceling other downloads and updates, changing your DNS server, and using a download manager or accelerator. You can also try some Windows-specific tips, such as updating your drivers, disabling background apps, adjusting your bandwidth settings, or using the Windows troubleshooter.</li>
93
- <li><b>How do I fix slow download speed on Mac?</b><br>There are many ways to fix slow download speed on Mac, such as restarting your device, testing your internet speed, upgrading your internet plan, disabling other devices connected to your router, clearing your browser's cache and cookies, using a wired connection instead of Wi-Fi, scanning your device for malware and viruses, pausing or canceling other downloads and updates, changing your DNS server, and using a download manager or accelerator. You can also try some Mac-specific tips, such as updating your software, deleting unused apps or files, optimizing your storage space, or using the Mac network utility.</li>
94
- <li><b>How do I fix slow download speed on Android?</b><br>There are many ways to fix slow download speed on Android, such as restarting your device, testing your internet speed, upgrading your internet plan, disabling other devices connected to your router, clearing your browser's cache and cookies, using a wired connection instead of Wi-Fi, scanning your device for malware and viruses, pausing or canceling other downloads and updates, changing your DNS server, and using a download manager or accelerator. You can also try some Android-specific tips, such as updating your apps, deleting unwanted apps or files, freeing up memory space, or using the Android data saver mode.</li>
95
- </ul></p> 401be4b1e0<br />
96
- <br />
97
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download YouTube Premium APK Mod and Access Exclusive Content Background Play and More.md DELETED
@@ -1,107 +0,0 @@
1
- <br />
2
- <h1>How to Download YouTube Premium APK Mod for Android</h1>
3
- <p>YouTube is one of the most popular social media platforms for videos, where you can watch, share, like, comment, and upload your own content. However, if you want to enjoy some extra features and benefits, such as ad-free viewing, offline downloads, background play, and access to YouTube Music and YouTube Originals, you need to subscribe to YouTube Premium, which costs $11.99 per month.</p>
4
- <p>But what if you don't want to pay for YouTube Premium? Is there a way to get all the premium features for free? The answer is yes, with YouTube Premium APK Mod. In this article, we will show you what YouTube Premium APK Mod is, how to download and install it on your Android device, how to use it, and what are the pros and cons of using it. Let's get started!</p>
5
- <h2>download youtube premium apk mod</h2><br /><p><b><b>Download Zip</b> &#10022;&#10022;&#10022; <a href="https://jinyurl.com/2uNQx6">https://jinyurl.com/2uNQx6</a></b></p><br /><br />
6
- <h2>What is YouTube Premium APK Mod?</h2>
7
- <p>YouTube Premium APK Mod is a modified version of the official YouTube app that unlocks all the premium features without requiring a subscription. With YouTube Premium APK Mod, you can enjoy the following features and benefits:</p>
8
- <ul>
9
- <li><strong>Ad-free viewing:</strong> You can watch any video without any annoying ads interrupting your experience.</li>
10
- <li><strong>Offline downloads:</strong> You can download any video or playlist to your device and watch them anytime, anywhere, even without an internet connection.</li>
11
- <li><strong>Background play:</strong> You can play any video in the background while using other apps or when your screen is off.</li>
12
- <li><strong>YouTube Music:</strong> You can access YouTube Music, a music streaming service that lets you listen to millions of songs and playlists.</li>
13
- <li><strong>YouTube Originals:</strong> You can watch exclusive shows and movies produced by YouTube, such as Cobra Kai, Impulse, Liza on Demand, and more.</li>
14
- </ul>
15
- <p>YouTube Premium APK Mod is not available on the Google Play Store, so you need to download it from a third-party source. However, you need to be careful when downloading any modded app, as some of them may contain malware or viruses that can harm your device. We recommend you to download YouTube Premium APK Mod from a trusted source, such as [APKdone](^1^), which offers safe and secure downloads of various modded apps and games.</p>
16
- <h2>How to Download and Install YouTube Premium APK Mod?</h2>
17
- <p>To download and install YouTube Premium APK Mod on your Android device, follow these simple steps:</p>
18
- <p>download youtube premium apk mod latest version<br />
19
- download youtube premium apk mod no root<br />
20
- download youtube premium apk mod for android<br />
21
- download youtube premium apk mod free<br />
22
- download youtube premium apk mod 2023<br />
23
- download youtube premium apk mod with background play<br />
24
- download youtube premium apk mod offline<br />
25
- download youtube premium apk mod ad-free<br />
26
- download youtube premium apk mod unlocked<br />
27
- download youtube premium apk mod 18.23.35<br />
28
- download youtube premium apk mod apkdone[^1^]<br />
29
- download youtube premium apk mod rexdl<br />
30
- download youtube premium apk mod revdl<br />
31
- download youtube premium apk mod happymod<br />
32
- download youtube premium apk mod apkpure<br />
33
- download youtube premium apk mod android 11<br />
34
- download youtube premium apk mod android 10<br />
35
- download youtube premium apk mod android 9<br />
36
- download youtube premium apk mod android 8<br />
37
- download youtube premium apk mod android 7<br />
38
- download youtube premium apk mod for pc<br />
39
- download youtube premium apk mod for ios<br />
40
- download youtube premium apk mod for firestick<br />
41
- download youtube premium apk mod for smart tv<br />
42
- download youtube premium apk mod for windows 10<br />
43
- download youtube premium apk mod without ads<br />
44
- download youtube premium apk mod without subscription<br />
45
- download youtube premium apk mod without verification<br />
46
- download youtube premium apk mod without login<br />
47
- download youtube premium apk mod without payment<br />
48
- download youtube premium apk mod with music<br />
49
- download youtube premium apk mod with dark mode<br />
50
- download youtube premium apk mod with mini player<br />
51
- download youtube premium apk mod with picture in picture<br />
52
- download youtube premium apk mod with subtitles<br />
53
- download youtube premium apk mod pro<br />
54
- download youtube premium apk mod plus<br />
55
- download youtube premium apk mod vip<br />
56
- download youtube premium apk mod gold<br />
57
- download youtube premium apk mod black</p>
58
- <ol>
59
- <li>Go to [APKdone](^1^) and search for "YouTube MOD APK".</li>
60
- <li>Select the latest version of the app and click on the "Download APK" button.</li>
61
- <li>Wait for the download to finish and then open the downloaded file.</li>
62
- <li>If you see a warning message that says "Install blocked", go to your device settings and enable "Unknown sources" under security options.</li>
63
- <li>Tap on "Install" and wait for the installation to complete.</li>
64
- <li>Launch the app and sign in with your Google account or create a new one.</li>
65
- <li>Congratulations! You have successfully installed YouTube Premium APK Mod on your device. Enjoy!</li>
66
- </ol>
67
- <h2>How to Use YouTube Premium APK Mod?</h2>
68
- <p>To use YouTube Premium APK Mod, you just need to open the app and browse through the videos as usual. You will notice that there are no ads on any video, and you can also see a download button below each video. To download a video or playlist, just tap on the download button and choose the quality and format you want. You can also access YouTube Music and YouTube Originals from the app menu. To play a video in the background, just tap on the background play icon on the top right corner of the video player. You can also control the playback from the notification bar or the lock screen. To use YouTube Premium APK Mod, you don't need to root your device or install any other app.</p>
69
- <h2>Pros and Cons of YouTube Premium APK Mod</h2>
70
- <p>YouTube Premium APK Mod is a great way to enjoy all the premium features of YouTube for free, but it also has some drawbacks that you should be aware of. Here are some of the pros and cons of using YouTube Premium APK Mod:</p>
71
- <table>
72
- <tr>
73
- <th>Pros</th>
74
- <th>Cons</th>
75
- </tr>
76
- <tr>
77
- <td><ul>
78
- <li>You can watch any video without ads.</li>
79
- <li>You can download any video or playlist for offline viewing.</li>
80
- <li>You can play any video in the background.</li>
81
- <li>You can access YouTube Music and YouTube Originals.</li>
82
- <li>You don't need to pay for a subscription.</li>
83
- </ul></td>
84
- <td><ul>
85
- <li>You may face some compatibility issues with some devices or Android versions.</li>
86
- <li>You may not get the latest updates and features from the official YouTube app.</li>
87
- <li>You may violate the terms and conditions of YouTube and Google.</li>
88
- <li>You may risk your device's security and privacy by downloading from an untrusted source.</li>
89
- <li>You may not support the creators and artists who rely on YouTube revenue.</li>
90
- </ul></td>
91
- </tr>
92
- </table>
93
- <h2>Conclusion</h2>
94
- <p>YouTube Premium APK Mod is a modded version of the official YouTube app that allows you to enjoy all the premium features of YouTube for free, such as ad-free viewing, offline downloads, background play, YouTube Music, and YouTube Originals. However, it also has some disadvantages, such as compatibility issues, lack of updates, potential legal and security risks, and ethical concerns. Therefore, you should use YouTube Premium APK Mod at your own discretion and responsibility. We hope this article has helped you understand how to download and install YouTube Premium APK Mod on your Android device and how to use it. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!</p>
95
- <h2>FAQs</h2>
96
- <h3>Is YouTube Premium APK Mod safe to use?</h3>
97
- <p>YouTube Premium APK Mod is generally safe to use if you download it from a trusted source, such as [APKdone]. However, you should always scan any downloaded file with an antivirus software before installing it on your device. You should also be careful about what permissions you grant to the app and what data you share with it.</p>
98
- <h3>Is YouTube Premium APK Mod legal to use?</h3>
99
- <p>YouTube Premium APK Mod is not legal to use, as it violates the terms and conditions of YouTube and Google. By using YouTube Premium APK Mod, you are bypassing the subscription fee that is required to access the premium features of YouTube. This may result in legal actions from YouTube or Google against you or the source of the modded app. Therefore, we do not endorse or encourage the use of YouTube Premium APK Mod.</p>
100
- <h3>Will YouTube Premium APK Mod work on iOS devices?</h3>
101
- <p>No, YouTube Premium APK Mod will not work on iOS devices, as it is only compatible with Android devices. If you want to enjoy the premium features of YouTube on your iOS device, you will need to subscribe to YouTube Premium or use other alternatives, such as third-party apps or websites that offer similar features.</p>
102
- <h3>How can I update YouTube Premium APK Mod?</h3>
103
- <p>To update YouTube Premium APK Mod, you will need to download and install the latest version of the app from the same source that you downloaded it from. You cannot update YouTube Premium APK Mod from the Google Play Store or the app itself, as it is not an official app. You should also check for updates regularly to ensure that you have the best performance and security of the app.</p>
104
- <h3>How can I uninstall YouTube Premium APK Mod?</h3>
105
- <p>To uninstall YouTube Premium APK Mod, you just need to follow the same steps that you would follow to uninstall any other app on your Android device. Go to your device settings and find the app manager or applications option. Then, find and select YouTube Premium APK Mod from the list of apps and tap on uninstall. You can also delete any downloaded videos or data from your device storage if you want to free up some space.</p> 197e85843d<br />
106
- <br />
107
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2ndelement/voicevox/test/test_synthesis_engine_base.py DELETED
@@ -1,411 +0,0 @@
1
- from typing import List, Union
2
- from unittest import TestCase
3
- from unittest.mock import Mock
4
-
5
- import numpy
6
-
7
- from voicevox_engine.model import AccentPhrase, AudioQuery, Mora
8
- from voicevox_engine.synthesis_engine import SynthesisEngine
9
-
10
-
11
- def yukarin_s_mock(length: int, phoneme_list: numpy.ndarray, speaker_id: numpy.ndarray):
12
- result = []
13
- # mockとしての適当な処理、特に意味はない
14
- for i in range(length):
15
- result.append(round(float(phoneme_list[i] * 0.0625 + speaker_id), 2))
16
- return numpy.array(result)
17
-
18
-
19
- def yukarin_sa_mock(
20
- length: int,
21
- vowel_phoneme_list: numpy.ndarray,
22
- consonant_phoneme_list: numpy.ndarray,
23
- start_accent_list: numpy.ndarray,
24
- end_accent_list: numpy.ndarray,
25
- start_accent_phrase_list: numpy.ndarray,
26
- end_accent_phrase_list: numpy.ndarray,
27
- speaker_id: numpy.ndarray,
28
- ):
29
- result = []
30
- # mockとしての適当な処理、特に意味はない
31
- for i in range(length):
32
- result.append(
33
- round(
34
- float(
35
- (
36
- vowel_phoneme_list[0][i]
37
- + consonant_phoneme_list[0][i]
38
- + start_accent_list[0][i]
39
- + end_accent_list[0][i]
40
- + start_accent_phrase_list[0][i]
41
- + end_accent_phrase_list[0][i]
42
- )
43
- * 0.0625
44
- + speaker_id
45
- ),
46
- 2,
47
- )
48
- )
49
- return numpy.array(result)[numpy.newaxis]
50
-
51
-
52
- def decode_mock(
53
- length: int,
54
- phoneme_size: int,
55
- f0: numpy.ndarray,
56
- phoneme: numpy.ndarray,
57
- speaker_id: Union[numpy.ndarray, int],
58
- ):
59
- result = []
60
- # mockとしての適当な処理、特に意味はない
61
- for i in range(length):
62
- # decode forwardはデータサイズがlengthの256倍になるのでとりあえず256回データをresultに入れる
63
- for _ in range(256):
64
- result.append(
65
- float(
66
- f0[i][0] * (numpy.where(phoneme[i] == 1)[0] / phoneme_size)
67
- + speaker_id
68
- )
69
- )
70
- return numpy.array(result)
71
-
72
-
73
- def koreha_arimasuka_base_expected():
74
- return [
75
- AccentPhrase(
76
- moras=[
77
- Mora(
78
- text="コ",
79
- consonant="k",
80
- consonant_length=2.44,
81
- vowel="o",
82
- vowel_length=2.88,
83
- pitch=4.38,
84
- ),
85
- Mora(
86
- text="レ",
87
- consonant="r",
88
- consonant_length=3.06,
89
- vowel="e",
90
- vowel_length=1.88,
91
- pitch=4.0,
92
- ),
93
- Mora(
94
- text="ワ",
95
- consonant="w",
96
- consonant_length=3.62,
97
- vowel="a",
98
- vowel_length=1.44,
99
- pitch=4.19,
100
- ),
101
- ],
102
- accent=3,
103
- pause_mora=None,
104
- is_interrogative=False,
105
- ),
106
- AccentPhrase(
107
- moras=[
108
- Mora(
109
- text="ア",
110
- consonant=None,
111
- consonant_length=None,
112
- vowel="a",
113
- vowel_length=1.44,
114
- pitch=1.44,
115
- ),
116
- Mora(
117
- text="リ",
118
- consonant="r",
119
- consonant_length=3.06,
120
- vowel="i",
121
- vowel_length=2.31,
122
- pitch=4.44,
123
- ),
124
- Mora(
125
- text="マ",
126
- consonant="m",
127
- consonant_length=2.62,
128
- vowel="a",
129
- vowel_length=1.44,
130
- pitch=3.12,
131
- ),
132
- Mora(
133
- text="ス",
134
- consonant="s",
135
- consonant_length=3.19,
136
- vowel="U",
137
- vowel_length=1.38,
138
- pitch=0.0,
139
- ),
140
- Mora(
141
- text="カ",
142
- consonant="k",
143
- consonant_length=2.44,
144
- vowel="a",
145
- vowel_length=1.44,
146
- pitch=2.94,
147
- ),
148
- ],
149
- accent=3,
150
- pause_mora=None,
151
- is_interrogative=False,
152
- ),
153
- ]
154
-
155
-
156
- def create_mock_query(accent_phrases):
157
- return AudioQuery(
158
- accent_phrases=accent_phrases,
159
- speedScale=1,
160
- pitchScale=0,
161
- intonationScale=1,
162
- volumeScale=1,
163
- prePhonemeLength=0.1,
164
- postPhonemeLength=0.1,
165
- outputSamplingRate=24000,
166
- outputStereo=False,
167
- kana="",
168
- )
169
-
170
-
171
- class MockCore:
172
- yukarin_s_forward = Mock(side_effect=yukarin_s_mock)
173
- yukarin_sa_forward = Mock(side_effect=yukarin_sa_mock)
174
- decode_forward = Mock(side_effect=decode_mock)
175
-
176
- def metas(self):
177
- return ""
178
-
179
- def supported_devices(self):
180
- return ""
181
-
182
- def is_model_loaded(self, speaker_id):
183
- return True
184
-
185
-
186
- class TestSynthesisEngineBase(TestCase):
187
- def setUp(self):
188
- super().setUp()
189
- self.synthesis_engine = SynthesisEngine(
190
- core=MockCore(),
191
- )
192
- self.synthesis_engine._synthesis_impl = Mock()
193
-
194
- def create_accent_phrases_test_base(self, text: str, expected: List[AccentPhrase]):
195
- actual = self.synthesis_engine.create_accent_phrases(text, 1)
196
- self.assertEqual(
197
- expected,
198
- actual,
199
- "case(text:" + text + ")",
200
- )
201
-
202
- def create_synthesis_test_base(
203
- self,
204
- text: str,
205
- expected: List[AccentPhrase],
206
- enable_interrogative_upspeak: bool,
207
- ):
208
- """音声合成時に疑問文モーラ処理を行っているかどうかを検証
209
- (https://github.com/VOICEVOX/voicevox_engine/issues/272#issuecomment-1022610866)
210
- """
211
- accent_phrases = self.synthesis_engine.create_accent_phrases(text, 1)
212
- query = create_mock_query(accent_phrases=accent_phrases)
213
- self.synthesis_engine.synthesis(
214
- query, 0, enable_interrogative_upspeak=enable_interrogative_upspeak
215
- )
216
- # _synthesis_implの第一引数に与えられたqueryを検証
217
- actual = self.synthesis_engine._synthesis_impl.call_args[0][0].accent_phrases
218
-
219
- self.assertEqual(
220
- expected,
221
- actual,
222
- "case(text:" + text + ")",
223
- )
224
-
225
- def test_create_accent_phrases(self):
226
- """accent_phrasesの作成時では疑問文モーラ処理を行わない
227
- (https://github.com/VOICEVOX/voicevox_engine/issues/272#issuecomment-1022610866)
228
- """
229
- expected = koreha_arimasuka_base_expected()
230
- expected[-1].is_interrogative = True
231
- self.create_accent_phrases_test_base(text="これはありますか?", expected=expected)
232
-
233
- def test_synthesis_interrogative(self):
234
- expected = koreha_arimasuka_base_expected()
235
- expected[-1].is_interrogative = True
236
- expected[-1].moras += [
237
- Mora(
238
- text="ア",
239
- consonant=None,
240
- consonant_length=None,
241
- vowel="a",
242
- vowel_length=0.15,
243
- pitch=expected[-1].moras[-1].pitch + 0.3,
244
- )
245
- ]
246
- self.create_synthesis_test_base(
247
- text="これはありますか?",
248
- expected=expected,
249
- enable_interrogative_upspeak=True,
250
- )
251
-
252
- expected = koreha_arimasuka_base_expected()
253
- expected[-1].is_interrogative = True
254
- self.create_synthesis_test_base(
255
- text="これはありますか?",
256
- expected=expected,
257
- enable_interrogative_upspeak=False,
258
- )
259
-
260
- expected = koreha_arimasuka_base_expected()
261
- self.create_synthesis_test_base(
262
- text="これはありますか",
263
- expected=expected,
264
- enable_interrogative_upspeak=True,
265
- )
266
-
267
- def nn_base_expected():
268
- return [
269
- AccentPhrase(
270
- moras=[
271
- Mora(
272
- text="ン",
273
- consonant=None,
274
- consonant_length=None,
275
- vowel="N",
276
- vowel_length=1.25,
277
- pitch=1.44,
278
- )
279
- ],
280
- accent=1,
281
- pause_mora=None,
282
- is_interrogative=False,
283
- )
284
- ]
285
-
286
- expected = nn_base_expected()
287
- self.create_synthesis_test_base(
288
- text="ん",
289
- expected=expected,
290
- enable_interrogative_upspeak=True,
291
- )
292
-
293
- expected = nn_base_expected()
294
- expected[-1].is_interrogative = True
295
- expected[-1].moras += [
296
- Mora(
297
- text="ン",
298
- consonant=None,
299
- consonant_length=None,
300
- vowel="N",
301
- vowel_length=0.15,
302
- pitch=expected[-1].moras[-1].pitch + 0.3,
303
- )
304
- ]
305
- self.create_synthesis_test_base(
306
- text="ん?",
307
- expected=expected,
308
- enable_interrogative_upspeak=True,
309
- )
310
-
311
- expected = nn_base_expected()
312
- expected[-1].is_interrogative = True
313
- self.create_synthesis_test_base(
314
- text="ん?",
315
- expected=expected,
316
- enable_interrogative_upspeak=False,
317
- )
318
-
319
- def ltu_base_expected():
320
- return [
321
- AccentPhrase(
322
- moras=[
323
- Mora(
324
- text="ッ",
325
- consonant=None,
326
- consonant_length=None,
327
- vowel="cl",
328
- vowel_length=1.69,
329
- pitch=0.0,
330
- )
331
- ],
332
- accent=1,
333
- pause_mora=None,
334
- is_interrogative=False,
335
- )
336
- ]
337
-
338
- expected = ltu_base_expected()
339
- self.create_synthesis_test_base(
340
- text="っ",
341
- expected=expected,
342
- enable_interrogative_upspeak=True,
343
- )
344
-
345
- expected = ltu_base_expected()
346
- expected[-1].is_interrogative = True
347
- self.create_synthesis_test_base(
348
- text="っ?",
349
- expected=expected,
350
- enable_interrogative_upspeak=True,
351
- )
352
-
353
- expected = ltu_base_expected()
354
- expected[-1].is_interrogative = True
355
- self.create_synthesis_test_base(
356
- text="っ?",
357
- expected=expected,
358
- enable_interrogative_upspeak=False,
359
- )
360
-
361
- def su_base_expected():
362
- return [
363
- AccentPhrase(
364
- moras=[
365
- Mora(
366
- text="ス",
367
- consonant="s",
368
- consonant_length=3.19,
369
- vowel="u",
370
- vowel_length=3.5,
371
- pitch=5.94,
372
- )
373
- ],
374
- accent=1,
375
- pause_mora=None,
376
- is_interrogative=False,
377
- )
378
- ]
379
-
380
- expected = su_base_expected()
381
- self.create_synthesis_test_base(
382
- text="す",
383
- expected=expected,
384
- enable_interrogative_upspeak=True,
385
- )
386
-
387
- expected = su_base_expected()
388
- expected[-1].is_interrogative = True
389
- expected[-1].moras += [
390
- Mora(
391
- text="ウ",
392
- consonant=None,
393
- consonant_length=None,
394
- vowel="u",
395
- vowel_length=0.15,
396
- pitch=expected[-1].moras[-1].pitch + 0.3,
397
- )
398
- ]
399
- self.create_synthesis_test_base(
400
- text="す?",
401
- expected=expected,
402
- enable_interrogative_upspeak=True,
403
- )
404
-
405
- expected = su_base_expected()
406
- expected[-1].is_interrogative = True
407
- self.create_synthesis_test_base(
408
- text="す?",
409
- expected=expected,
410
- enable_interrogative_upspeak=False,
411
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/audio2pose_models/audio2pose.py DELETED
@@ -1,94 +0,0 @@
1
- import torch
2
- from torch import nn
3
- from src.audio2pose_models.cvae import CVAE
4
- from src.audio2pose_models.discriminator import PoseSequenceDiscriminator
5
- from src.audio2pose_models.audio_encoder import AudioEncoder
6
-
7
- class Audio2Pose(nn.Module):
8
- def __init__(self, cfg, wav2lip_checkpoint, device='cuda'):
9
- super().__init__()
10
- self.cfg = cfg
11
- self.seq_len = cfg.MODEL.CVAE.SEQ_LEN
12
- self.latent_dim = cfg.MODEL.CVAE.LATENT_SIZE
13
- self.device = device
14
-
15
- self.audio_encoder = AudioEncoder(wav2lip_checkpoint)
16
- self.audio_encoder.eval()
17
- for param in self.audio_encoder.parameters():
18
- param.requires_grad = False
19
-
20
- self.netG = CVAE(cfg)
21
- self.netD_motion = PoseSequenceDiscriminator(cfg)
22
-
23
- self.gan_criterion = nn.MSELoss()
24
- self.reg_criterion = nn.L1Loss(reduction='none')
25
- self.pair_criterion = nn.PairwiseDistance()
26
- self.cosine_loss = nn.CosineSimilarity(dim=1)
27
-
28
- def forward(self, x):
29
-
30
- batch = {}
31
- coeff_gt = x['gt'].cuda().squeeze(0) #bs frame_len+1 73
32
- batch['pose_motion_gt'] = coeff_gt[:, 1:, -9:-3] - coeff_gt[:, :1, -9:-3] #bs frame_len 6
33
- batch['ref'] = coeff_gt[:, 0, -9:-3] #bs 6
34
- batch['class'] = x['class'].squeeze(0).cuda() # bs
35
- indiv_mels= x['indiv_mels'].cuda().squeeze(0) # bs seq_len+1 80 16
36
-
37
- # forward
38
- audio_emb_list = []
39
- audio_emb = self.audio_encoder(indiv_mels[:, 1:, :, :].unsqueeze(2)) #bs seq_len 512
40
- batch['audio_emb'] = audio_emb
41
- batch = self.netG(batch)
42
-
43
- pose_motion_pred = batch['pose_motion_pred'] # bs frame_len 6
44
- pose_gt = coeff_gt[:, 1:, -9:-3].clone() # bs frame_len 6
45
- pose_pred = coeff_gt[:, :1, -9:-3] + pose_motion_pred # bs frame_len 6
46
-
47
- batch['pose_pred'] = pose_pred
48
- batch['pose_gt'] = pose_gt
49
-
50
- return batch
51
-
52
- def test(self, x):
53
-
54
- batch = {}
55
- ref = x['ref'] #bs 1 70
56
- batch['ref'] = x['ref'][:,0,-6:]
57
- batch['class'] = x['class']
58
- bs = ref.shape[0]
59
-
60
- indiv_mels= x['indiv_mels'] # bs T 1 80 16
61
- indiv_mels_use = indiv_mels[:, 1:] # we regard the ref as the first frame
62
- num_frames = x['num_frames']
63
- num_frames = int(num_frames) - 1
64
-
65
- #
66
- div = num_frames//self.seq_len
67
- re = num_frames%self.seq_len
68
- audio_emb_list = []
69
- pose_motion_pred_list = [torch.zeros(batch['ref'].unsqueeze(1).shape, dtype=batch['ref'].dtype,
70
- device=batch['ref'].device)]
71
-
72
- for i in range(div):
73
- z = torch.randn(bs, self.latent_dim).to(ref.device)
74
- batch['z'] = z
75
- audio_emb = self.audio_encoder(indiv_mels_use[:, i*self.seq_len:(i+1)*self.seq_len,:,:,:]) #bs seq_len 512
76
- batch['audio_emb'] = audio_emb
77
- batch = self.netG.test(batch)
78
- pose_motion_pred_list.append(batch['pose_motion_pred']) #list of bs seq_len 6
79
-
80
- if re != 0:
81
- z = torch.randn(bs, self.latent_dim).to(ref.device)
82
- batch['z'] = z
83
- audio_emb = self.audio_encoder(indiv_mels_use[:, -1*self.seq_len:,:,:,:]) #bs seq_len 512
84
- batch['audio_emb'] = audio_emb
85
- batch = self.netG.test(batch)
86
- pose_motion_pred_list.append(batch['pose_motion_pred'][:,-1*re:,:])
87
-
88
- pose_motion_pred = torch.cat(pose_motion_pred_list, dim = 1)
89
- batch['pose_motion_pred'] = pose_motion_pred
90
-
91
- pose_pred = ref[:, :1, -6:] + pose_motion_pred # bs T 6
92
-
93
- batch['pose_pred'] = pose_pred
94
- return batch
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIWaves/Debate/src/agents/Environment/__init__.py DELETED
@@ -1 +0,0 @@
1
- from .base_environment import Environment
 
 
spaces/Ababababababbababa/poetry2023/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Poetry2023
3
- emoji: 👁
4
- colorFrom: green
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.16.0
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: Aaaaaaaabdualh/poetry2023
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/arcadetcrp.js DELETED
@@ -1,11 +0,0 @@
1
- import Recorder from './logic/runcommands/arcadetcrp/Recorder.js';
2
- import Player from './logic/runcommands/arcadetcrp/Player.js';
3
- import StepRunner from './logic/runcommands/arcadetcrp/StepRunner.js';
4
- import RunCommands from './logic/runcommands/RunCommands.js';
5
-
6
- export default {
7
- Recorder: Recorder,
8
- Player: Player,
9
- StepRunner: StepRunner,
10
- RunCommands: RunCommands
11
- };
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/mousewheeltoupdown.d.ts DELETED
@@ -1,2 +0,0 @@
1
- import MouseWheelToUpDown from './input/mousewheeltoupdown/MouseWheelToUpDown';
2
- export default MouseWheelToUpDown;
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/holygrail/methods/Build.js DELETED
@@ -1,40 +0,0 @@
1
- import LayoutMode0 from './LayoutMode0.js';
2
- import LayoutMode1 from './LayoutMode1.js';
3
- import LayoutMode2 from './LayoutMode2.js';
4
- import LayoutMode3 from './LayoutMode3.js';
5
-
6
- const GetValue = Phaser.Utils.Objects.GetValue;
7
- const LayoutCallbacks = [LayoutMode0, LayoutMode1, LayoutMode2, LayoutMode3];
8
-
9
- var Build = function (config) {
10
- this.clear(true);
11
-
12
- // Add Background
13
- var background = GetValue(config, 'background', undefined);
14
- if (background) {
15
- this.addBackground(background);
16
- }
17
-
18
- var layoutMode = GetValue(config, 'layoutMode', 0);
19
- if (typeof (layoutMode) === 'string') {
20
- layoutMode = LayoutModesMap[layoutMode.toUpperCase()];
21
- }
22
- var layoutCallback = LayoutCallbacks[layoutMode] || LayoutCallbacks[0];
23
- layoutCallback.call(this, config);
24
-
25
- this.addChildrenMap('background', config.background);
26
- this.addChildrenMap('header', config.header);
27
- this.addChildrenMap('leftSide', config.leftSide);
28
- this.addChildrenMap('content', config.content);
29
- this.addChildrenMap('rightSide', config.rightSide);
30
- this.addChildrenMap('footer', config.footer);
31
- }
32
-
33
- const LayoutModesMap = {
34
- 'FFF': 0,
35
- 'LFF': 1,
36
- 'FFR': 2,
37
- 'LFR': 3
38
- }
39
-
40
- export default Build;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/slider/Slider.js DELETED
@@ -1,194 +0,0 @@
1
- import Sizer from '../sizer/Sizer.js';
2
- import CreateBackground from '../utils/build/CreateBackground.js';
3
- import ProgressBase from '../../../plugins/utils/progressbase/ProgressBase.js';
4
- import OnDragThumb from './OnDragThumb.js';
5
- import OnTouchTrack from './OnTouchTrack.js';
6
- import GetStartPoint from './GetStartPoint.js';
7
- import GetEndPoint from './GetEndPoint.js';
8
- import UpdateThumb from './UpdateThumb.js';
9
- import UpdateIndicator from './UpdateIndicator.js';
10
-
11
- const GetValue = Phaser.Utils.Objects.GetValue;
12
- const IsPlainObject = Phaser.Utils.Objects.IsPlainObject;
13
- const Clamp = Phaser.Math.Clamp;
14
- const SnapTo = Phaser.Math.Snap.To;
15
-
16
- class Slider extends ProgressBase(Sizer) {
17
- constructor(scene, config) {
18
- // Create sizer
19
- super(scene, config);
20
- this.type = 'rexSlider';
21
-
22
- this.bootProgressBase(config);
23
-
24
- this.reverseAxis = GetValue(config, 'reverseAxis', false);
25
-
26
- // Add elements
27
- var background = GetValue(config, 'background', undefined);
28
- var track = GetValue(config, 'track', undefined);
29
- var indicator = GetValue(config, 'indicator', undefined);
30
- var thumb = GetValue(config, 'thumb', undefined);
31
-
32
- if (background) {
33
- if (IsPlainObject(background)) {
34
- background = CreateBackground(scene, background);
35
- }
36
- this.addBackground(background);
37
- }
38
-
39
- if (track) {
40
- if (IsPlainObject(track)) {
41
- track = CreateBackground(scene, track);
42
- }
43
- this.add(track,
44
- {
45
- proportion: 1,
46
- expand: true,
47
- minWidth: ((this.orientation === 0) ? 0 : undefined),
48
- minHeight: ((this.orientation === 1) ? 0 : undefined)
49
- }
50
- )
51
- }
52
-
53
- if (indicator) {
54
- if (IsPlainObject(indicator)) {
55
- indicator = CreateBackground(scene, indicator);
56
- }
57
- this.pin(indicator); // Put into container but not layout it
58
- }
59
-
60
- if (thumb) {
61
- if (IsPlainObject(thumb)) {
62
- thumb = CreateBackground(scene, thumb);
63
- }
64
- this.pin(thumb); // Put into container but not layout it
65
-
66
- }
67
-
68
- // Input
69
- var inputMode = GetValue(config, 'input', 0);
70
- if (typeof (inputMode) === 'string') {
71
- inputMode = INPUTMODE[inputMode];
72
- }
73
- switch (inputMode) {
74
- case 0: // 'drag'
75
- if (thumb) {
76
- thumb.setInteractive();
77
- this.scene.input.setDraggable(thumb);
78
- thumb
79
- .on('drag', OnDragThumb, this)
80
- .on('dragstart', function (pointer) {
81
- this.eventEmitter.emit('inputstart', pointer);
82
- }, this)
83
- .on('dragend', function (pointer) {
84
- this.eventEmitter.emit('inputend', pointer);
85
- }, this)
86
-
87
- }
88
- break;
89
- case 1: // 'click'
90
- this
91
- .on('pointerdown', OnTouchTrack, this)
92
- .on('pointermove', OnTouchTrack, this)
93
- .on('pointerdown', function (pointer) {
94
- this.eventEmitter.emit('inputstart', pointer);
95
- }, this)
96
- .on('pointerup', function (pointer) {
97
- this.eventEmitter.emit('inputend', pointer);
98
- }, this)
99
- .on('pointerover', function (pointer) {
100
- if (pointer.isDown) {
101
- this.eventEmitter.emit('inputstart', pointer);
102
- }
103
- }, this)
104
- .on('pointerout', function (pointer) {
105
- if (pointer.isDown) {
106
- this.eventEmitter.emit('inputend', pointer);
107
- }
108
- }, this)
109
- .setInteractive()
110
-
111
- break;
112
- }
113
-
114
- this.addChildrenMap('background', background);
115
- this.addChildrenMap('track', track);
116
- this.addChildrenMap('indicator', indicator);
117
- this.addChildrenMap('thumb', thumb);
118
-
119
- this.setEnable(GetValue(config, 'enable', undefined));
120
- this.setGap(GetValue(config, 'gap', undefined));
121
- this.setValue(GetValue(config, 'value', 0), GetValue(config, 'min', undefined), GetValue(config, 'max', undefined));
122
-
123
- }
124
-
125
- setEnable(enable) {
126
- if (enable === undefined) {
127
- enable = true;
128
- }
129
- this.enable = enable;
130
- return this;
131
- }
132
-
133
- setGap(gap, min, max) {
134
- if (gap && (min !== undefined)) {
135
- gap = gap / (max - min);
136
- }
137
-
138
- this.gap = gap;
139
- return this;
140
- }
141
-
142
- // Override
143
- get value() {
144
- return this._value;
145
- }
146
-
147
- // Override
148
- set value(value) {
149
- if (this.gap !== undefined) {
150
- value = SnapTo(value, this.gap);
151
- }
152
- var oldValue = this._value;
153
- this._value = Clamp(value, 0, 1);
154
-
155
- if (oldValue !== this._value) {
156
- this.updateThumb(this._value);
157
- this.updateIndicator(this._value);
158
- this.eventEmitter.emit('valuechange', this._value, oldValue, this.eventEmitter);
159
- }
160
- }
161
-
162
- runLayout(parent, newWidth, newHeight) {
163
- // Skip hidden or !dirty sizer
164
- if (this.ignoreLayout) {
165
- return this;
166
- }
167
-
168
- super.runLayout(parent, newWidth, newHeight);
169
- this.updateThumb();
170
- this.updateIndicator();
171
- return this;
172
- }
173
- }
174
-
175
- const INPUTMODE = {
176
- pan: 0,
177
- drag: 0,
178
- click: 1,
179
- none: -1,
180
- }
181
-
182
- var methods = {
183
- getStartPoint: GetStartPoint,
184
- getEndPoint: GetEndPoint,
185
- updateThumb: UpdateThumb,
186
- updateIndicator: UpdateIndicator,
187
- }
188
-
189
- Object.assign(
190
- Slider.prototype,
191
- methods,
192
- );
193
-
194
- export default Slider;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlekseyKorshuk/instagram-filter-removal/README.md DELETED
@@ -1,37 +0,0 @@
1
- ---
2
- title: Instagram Filter Removal
3
- emoji: 👀
4
- colorFrom: gray
5
- colorTo: green
6
- sdk: gradio
7
- app_file: app.py
8
- pinned: false
9
- ---
10
-
11
- # Configuration
12
-
13
- `title`: _string_
14
- Display title for the Space
15
-
16
- `emoji`: _string_
17
- Space emoji (emoji-only character allowed)
18
-
19
- `colorFrom`: _string_
20
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
21
-
22
- `colorTo`: _string_
23
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
24
-
25
- `sdk`: _string_
26
- Can be either `gradio` or `streamlit`
27
-
28
- `sdk_version` : _string_
29
- Only applicable for `streamlit` SDK.
30
- See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
31
-
32
- `app_file`: _string_
33
- Path to your main application file (which contains either `gradio` or `streamlit` Python code).
34
- Path is relative to the root of the repository.
35
-
36
- `pinned`: _boolean_
37
- Whether the Space stays on top of your list.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/bin/extract_masks.py DELETED
@@ -1,63 +0,0 @@
1
- import PIL.Image as Image
2
- import numpy as np
3
- import os
4
-
5
-
6
- def main(args):
7
- if not args.indir.endswith('/'):
8
- args.indir += '/'
9
- os.makedirs(args.outdir, exist_ok=True)
10
-
11
- src_images = [
12
- args.indir+fname for fname in os.listdir(args.indir)]
13
-
14
- tgt_masks = [
15
- args.outdir+fname[:-4] + f'_mask000.png'
16
- for fname in os.listdir(args.indir)]
17
-
18
- for img_name, msk_name in zip(src_images, tgt_masks):
19
- #print(img)
20
- #print(msk)
21
-
22
- image = Image.open(img_name).convert('RGB')
23
- image = np.transpose(np.array(image), (2, 0, 1))
24
-
25
- mask = (image == 255).astype(int)
26
-
27
- print(mask.dtype, mask.shape)
28
-
29
-
30
- Image.fromarray(
31
- np.clip(mask[0,:,:] * 255, 0, 255).astype('uint8'),mode='L'
32
- ).save(msk_name)
33
-
34
-
35
-
36
-
37
- '''
38
- for infile in src_images:
39
- try:
40
- file_relpath = infile[len(indir):]
41
- img_outpath = os.path.join(outdir, file_relpath)
42
- os.makedirs(os.path.dirname(img_outpath), exist_ok=True)
43
-
44
- image = Image.open(infile).convert('RGB')
45
-
46
- mask =
47
-
48
- Image.fromarray(
49
- np.clip(
50
- cur_mask * 255, 0, 255).astype('uint8'),
51
- mode='L'
52
- ).save(cur_basename + f'_mask{i:03d}.png')
53
- '''
54
-
55
-
56
-
57
- if __name__ == '__main__':
58
- import argparse
59
- aparser = argparse.ArgumentParser()
60
- aparser.add_argument('--indir', type=str, help='Path to folder with images')
61
- aparser.add_argument('--outdir', type=str, help='Path to folder to store aligned images and masks to')
62
-
63
- main(aparser.parse_args())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alycer/VITS-Umamusume-voice-synthesizer/text/cleaners.py DELETED
@@ -1,146 +0,0 @@
1
- import re
2
-
3
-
4
- def japanese_cleaners(text):
5
- from text.japanese import japanese_to_romaji_with_accent
6
- text = japanese_to_romaji_with_accent(text)
7
- text = re.sub(r'([A-Za-z])$', r'\1.', text)
8
- return text
9
-
10
-
11
- def japanese_cleaners2(text):
12
- return japanese_cleaners(text).replace('ts', 'ʦ').replace('...', '…')
13
-
14
-
15
- def korean_cleaners(text):
16
- '''Pipeline for Korean text'''
17
- from text.korean import latin_to_hangul, number_to_hangul, divide_hangul
18
- text = latin_to_hangul(text)
19
- text = number_to_hangul(text)
20
- text = divide_hangul(text)
21
- text = re.sub(r'([\u3131-\u3163])$', r'\1.', text)
22
- return text
23
-
24
-
25
- def chinese_cleaners(text):
26
- '''Pipeline for Chinese text'''
27
- from text.mandarin import number_to_chinese, chinese_to_bopomofo, latin_to_bopomofo
28
- text = number_to_chinese(text)
29
- text = chinese_to_bopomofo(text)
30
- text = latin_to_bopomofo(text)
31
- text = re.sub(r'([ˉˊˇˋ˙])$', r'\1。', text)
32
- return text
33
-
34
-
35
- def zh_ja_mixture_cleaners(text):
36
- from text.mandarin import chinese_to_romaji
37
- from text.japanese import japanese_to_romaji_with_accent
38
- text = re.sub(r'\[ZH\](.*?)\[ZH\]',
39
- lambda x: chinese_to_romaji(x.group(1))+' ', text)
40
- text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: japanese_to_romaji_with_accent(
41
- x.group(1)).replace('ts', 'ʦ').replace('u', 'ɯ').replace('...', '…')+' ', text)
42
- text = re.sub(r'\s+$', '', text)
43
- text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
44
- return text
45
-
46
-
47
- def sanskrit_cleaners(text):
48
- text = text.replace('॥', '।').replace('ॐ', 'ओम्')
49
- if text[-1] != '।':
50
- text += ' ।'
51
- return text
52
-
53
-
54
- def cjks_cleaners(text):
55
- from text.mandarin import chinese_to_lazy_ipa
56
- from text.japanese import japanese_to_ipa
57
- from text.korean import korean_to_lazy_ipa
58
- from text.sanskrit import devanagari_to_ipa
59
- from text.english import english_to_lazy_ipa
60
- text = re.sub(r'\[ZH\](.*?)\[ZH\]',
61
- lambda x: chinese_to_lazy_ipa(x.group(1))+' ', text)
62
- text = re.sub(r'\[JA\](.*?)\[JA\]',
63
- lambda x: japanese_to_ipa(x.group(1))+' ', text)
64
- text = re.sub(r'\[KO\](.*?)\[KO\]',
65
- lambda x: korean_to_lazy_ipa(x.group(1))+' ', text)
66
- text = re.sub(r'\[SA\](.*?)\[SA\]',
67
- lambda x: devanagari_to_ipa(x.group(1))+' ', text)
68
- text = re.sub(r'\[EN\](.*?)\[EN\]',
69
- lambda x: english_to_lazy_ipa(x.group(1))+' ', text)
70
- text = re.sub(r'\s+$', '', text)
71
- text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
72
- return text
73
-
74
-
75
- def cjke_cleaners(text):
76
- from text.mandarin import chinese_to_lazy_ipa
77
- from text.japanese import japanese_to_ipa
78
- from text.korean import korean_to_ipa
79
- from text.english import english_to_ipa2
80
- text = re.sub(r'\[ZH\](.*?)\[ZH\]', lambda x: chinese_to_lazy_ipa(x.group(1)).replace(
81
- 'ʧ', 'tʃ').replace('ʦ', 'ts').replace('ɥan', 'ɥæn')+' ', text)
82
- text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: japanese_to_ipa(x.group(1)).replace('ʧ', 'tʃ').replace(
83
- 'ʦ', 'ts').replace('ɥan', 'ɥæn').replace('ʥ', 'dz')+' ', text)
84
- text = re.sub(r'\[KO\](.*?)\[KO\]',
85
- lambda x: korean_to_ipa(x.group(1))+' ', text)
86
- text = re.sub(r'\[EN\](.*?)\[EN\]', lambda x: english_to_ipa2(x.group(1)).replace('ɑ', 'a').replace(
87
- 'ɔ', 'o').replace('ɛ', 'e').replace('ɪ', 'i').replace('ʊ', 'u')+' ', text)
88
- text = re.sub(r'\s+$', '', text)
89
- text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
90
- return text
91
-
92
-
93
- def cjke_cleaners2(text):
94
- from text.mandarin import chinese_to_ipa
95
- from text.japanese import japanese_to_ipa2
96
- from text.korean import korean_to_ipa
97
- from text.english import english_to_ipa2
98
- text = re.sub(r'\[ZH\](.*?)\[ZH\]',
99
- lambda x: chinese_to_ipa(x.group(1))+' ', text)
100
- text = re.sub(r'\[JA\](.*?)\[JA\]',
101
- lambda x: japanese_to_ipa2(x.group(1))+' ', text)
102
- text = re.sub(r'\[KO\](.*?)\[KO\]',
103
- lambda x: korean_to_ipa(x.group(1))+' ', text)
104
- text = re.sub(r'\[EN\](.*?)\[EN\]',
105
- lambda x: english_to_ipa2(x.group(1))+' ', text)
106
- text = re.sub(r'\s+$', '', text)
107
- text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
108
- return text
109
-
110
-
111
- def thai_cleaners(text):
112
- from text.thai import num_to_thai, latin_to_thai
113
- text = num_to_thai(text)
114
- text = latin_to_thai(text)
115
- return text
116
-
117
-
118
- def shanghainese_cleaners(text):
119
- from text.shanghainese import shanghainese_to_ipa
120
- text = shanghainese_to_ipa(text)
121
- text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
122
- return text
123
-
124
-
125
- def chinese_dialect_cleaners(text):
126
- from text.mandarin import chinese_to_ipa2
127
- from text.japanese import japanese_to_ipa3
128
- from text.shanghainese import shanghainese_to_ipa
129
- from text.cantonese import cantonese_to_ipa
130
- from text.english import english_to_lazy_ipa2
131
- from text.ngu_dialect import ngu_dialect_to_ipa
132
- text = re.sub(r'\[ZH\](.*?)\[ZH\]',
133
- lambda x: chinese_to_ipa2(x.group(1))+' ', text)
134
- text = re.sub(r'\[JA\](.*?)\[JA\]',
135
- lambda x: japanese_to_ipa3(x.group(1)).replace('Q', 'ʔ')+' ', text)
136
- text = re.sub(r'\[SH\](.*?)\[SH\]', lambda x: shanghainese_to_ipa(x.group(1)).replace('1', '˥˧').replace('5',
137
- '˧˧˦').replace('6', '˩˩˧').replace('7', '˥').replace('8', '˩˨').replace('ᴀ', 'ɐ').replace('ᴇ', 'e')+' ', text)
138
- text = re.sub(r'\[GD\](.*?)\[GD\]',
139
- lambda x: cantonese_to_ipa(x.group(1))+' ', text)
140
- text = re.sub(r'\[EN\](.*?)\[EN\]',
141
- lambda x: english_to_lazy_ipa2(x.group(1))+' ', text)
142
- text = re.sub(r'\[([A-Z]{2})\](.*?)\[\1\]', lambda x: ngu_dialect_to_ipa(x.group(2), x.group(
143
- 1)).replace('ʣ', 'dz').replace('ʥ', 'dʑ').replace('ʦ', 'ts').replace('ʨ', 'tɕ')+' ', text)
144
- text = re.sub(r'\s+$', '', text)
145
- text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
146
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/controlnet/train_controlnet.py DELETED
@@ -1,1127 +0,0 @@
1
- #!/usr/bin/env python
2
- # coding=utf-8
3
- # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
-
16
- import argparse
17
- import logging
18
- import math
19
- import os
20
- import random
21
- import shutil
22
- from pathlib import Path
23
-
24
- import accelerate
25
- import numpy as np
26
- import torch
27
- import torch.nn.functional as F
28
- import torch.utils.checkpoint
29
- import transformers
30
- from accelerate import Accelerator
31
- from accelerate.logging import get_logger
32
- from accelerate.utils import ProjectConfiguration, set_seed
33
- from datasets import load_dataset
34
- from huggingface_hub import create_repo, upload_folder
35
- from packaging import version
36
- from PIL import Image
37
- from torchvision import transforms
38
- from tqdm.auto import tqdm
39
- from transformers import AutoTokenizer, PretrainedConfig
40
-
41
- import diffusers
42
- from diffusers import (
43
- AutoencoderKL,
44
- ControlNetModel,
45
- DDPMScheduler,
46
- StableDiffusionControlNetPipeline,
47
- UNet2DConditionModel,
48
- UniPCMultistepScheduler,
49
- )
50
- from diffusers.optimization import get_scheduler
51
- from diffusers.utils import check_min_version, is_wandb_available
52
- from diffusers.utils.import_utils import is_xformers_available
53
-
54
-
55
- if is_wandb_available():
56
- import wandb
57
-
58
- # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
59
- check_min_version("0.19.0")
60
-
61
- logger = get_logger(__name__)
62
-
63
-
64
- def image_grid(imgs, rows, cols):
65
- assert len(imgs) == rows * cols
66
-
67
- w, h = imgs[0].size
68
- grid = Image.new("RGB", size=(cols * w, rows * h))
69
-
70
- for i, img in enumerate(imgs):
71
- grid.paste(img, box=(i % cols * w, i // cols * h))
72
- return grid
73
-
74
-
75
- def log_validation(vae, text_encoder, tokenizer, unet, controlnet, args, accelerator, weight_dtype, step):
76
- logger.info("Running validation... ")
77
-
78
- controlnet = accelerator.unwrap_model(controlnet)
79
-
80
- pipeline = StableDiffusionControlNetPipeline.from_pretrained(
81
- args.pretrained_model_name_or_path,
82
- vae=vae,
83
- text_encoder=text_encoder,
84
- tokenizer=tokenizer,
85
- unet=unet,
86
- controlnet=controlnet,
87
- safety_checker=None,
88
- revision=args.revision,
89
- torch_dtype=weight_dtype,
90
- )
91
- pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config)
92
- pipeline = pipeline.to(accelerator.device)
93
- pipeline.set_progress_bar_config(disable=True)
94
-
95
- if args.enable_xformers_memory_efficient_attention:
96
- pipeline.enable_xformers_memory_efficient_attention()
97
-
98
- if args.seed is None:
99
- generator = None
100
- else:
101
- generator = torch.Generator(device=accelerator.device).manual_seed(args.seed)
102
-
103
- if len(args.validation_image) == len(args.validation_prompt):
104
- validation_images = args.validation_image
105
- validation_prompts = args.validation_prompt
106
- elif len(args.validation_image) == 1:
107
- validation_images = args.validation_image * len(args.validation_prompt)
108
- validation_prompts = args.validation_prompt
109
- elif len(args.validation_prompt) == 1:
110
- validation_images = args.validation_image
111
- validation_prompts = args.validation_prompt * len(args.validation_image)
112
- else:
113
- raise ValueError(
114
- "number of `args.validation_image` and `args.validation_prompt` should be checked in `parse_args`"
115
- )
116
-
117
- image_logs = []
118
-
119
- for validation_prompt, validation_image in zip(validation_prompts, validation_images):
120
- validation_image = Image.open(validation_image).convert("RGB")
121
-
122
- images = []
123
-
124
- for _ in range(args.num_validation_images):
125
- with torch.autocast("cuda"):
126
- image = pipeline(
127
- validation_prompt, validation_image, num_inference_steps=20, generator=generator
128
- ).images[0]
129
-
130
- images.append(image)
131
-
132
- image_logs.append(
133
- {"validation_image": validation_image, "images": images, "validation_prompt": validation_prompt}
134
- )
135
-
136
- for tracker in accelerator.trackers:
137
- if tracker.name == "tensorboard":
138
- for log in image_logs:
139
- images = log["images"]
140
- validation_prompt = log["validation_prompt"]
141
- validation_image = log["validation_image"]
142
-
143
- formatted_images = []
144
-
145
- formatted_images.append(np.asarray(validation_image))
146
-
147
- for image in images:
148
- formatted_images.append(np.asarray(image))
149
-
150
- formatted_images = np.stack(formatted_images)
151
-
152
- tracker.writer.add_images(validation_prompt, formatted_images, step, dataformats="NHWC")
153
- elif tracker.name == "wandb":
154
- formatted_images = []
155
-
156
- for log in image_logs:
157
- images = log["images"]
158
- validation_prompt = log["validation_prompt"]
159
- validation_image = log["validation_image"]
160
-
161
- formatted_images.append(wandb.Image(validation_image, caption="Controlnet conditioning"))
162
-
163
- for image in images:
164
- image = wandb.Image(image, caption=validation_prompt)
165
- formatted_images.append(image)
166
-
167
- tracker.log({"validation": formatted_images})
168
- else:
169
- logger.warn(f"image logging not implemented for {tracker.name}")
170
-
171
- return image_logs
172
-
173
-
174
- def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str):
175
- text_encoder_config = PretrainedConfig.from_pretrained(
176
- pretrained_model_name_or_path,
177
- subfolder="text_encoder",
178
- revision=revision,
179
- )
180
- model_class = text_encoder_config.architectures[0]
181
-
182
- if model_class == "CLIPTextModel":
183
- from transformers import CLIPTextModel
184
-
185
- return CLIPTextModel
186
- elif model_class == "RobertaSeriesModelWithTransformation":
187
- from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation
188
-
189
- return RobertaSeriesModelWithTransformation
190
- else:
191
- raise ValueError(f"{model_class} is not supported.")
192
-
193
-
194
- def save_model_card(repo_id: str, image_logs=None, base_model=str, repo_folder=None):
195
- img_str = ""
196
- if image_logs is not None:
197
- img_str = "You can find some example images below.\n"
198
- for i, log in enumerate(image_logs):
199
- images = log["images"]
200
- validation_prompt = log["validation_prompt"]
201
- validation_image = log["validation_image"]
202
- validation_image.save(os.path.join(repo_folder, "image_control.png"))
203
- img_str += f"prompt: {validation_prompt}\n"
204
- images = [validation_image] + images
205
- image_grid(images, 1, len(images)).save(os.path.join(repo_folder, f"images_{i}.png"))
206
- img_str += f"![images_{i})](./images_{i}.png)\n"
207
-
208
- yaml = f"""
209
- ---
210
- license: creativeml-openrail-m
211
- base_model: {base_model}
212
- tags:
213
- - stable-diffusion
214
- - stable-diffusion-diffusers
215
- - text-to-image
216
- - diffusers
217
- - controlnet
218
- inference: true
219
- ---
220
- """
221
- model_card = f"""
222
- # controlnet-{repo_id}
223
-
224
- These are controlnet weights trained on {base_model} with new type of conditioning.
225
- {img_str}
226
- """
227
- with open(os.path.join(repo_folder, "README.md"), "w") as f:
228
- f.write(yaml + model_card)
229
-
230
-
231
- def parse_args(input_args=None):
232
- parser = argparse.ArgumentParser(description="Simple example of a ControlNet training script.")
233
- parser.add_argument(
234
- "--pretrained_model_name_or_path",
235
- type=str,
236
- default=None,
237
- required=True,
238
- help="Path to pretrained model or model identifier from huggingface.co/models.",
239
- )
240
- parser.add_argument(
241
- "--controlnet_model_name_or_path",
242
- type=str,
243
- default=None,
244
- help="Path to pretrained controlnet model or model identifier from huggingface.co/models."
245
- " If not specified controlnet weights are initialized from unet.",
246
- )
247
- parser.add_argument(
248
- "--revision",
249
- type=str,
250
- default=None,
251
- required=False,
252
- help=(
253
- "Revision of pretrained model identifier from huggingface.co/models. Trainable model components should be"
254
- " float32 precision."
255
- ),
256
- )
257
- parser.add_argument(
258
- "--tokenizer_name",
259
- type=str,
260
- default=None,
261
- help="Pretrained tokenizer name or path if not the same as model_name",
262
- )
263
- parser.add_argument(
264
- "--output_dir",
265
- type=str,
266
- default="controlnet-model",
267
- help="The output directory where the model predictions and checkpoints will be written.",
268
- )
269
- parser.add_argument(
270
- "--cache_dir",
271
- type=str,
272
- default=None,
273
- help="The directory where the downloaded models and datasets will be stored.",
274
- )
275
- parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
276
- parser.add_argument(
277
- "--resolution",
278
- type=int,
279
- default=512,
280
- help=(
281
- "The resolution for input images, all the images in the train/validation dataset will be resized to this"
282
- " resolution"
283
- ),
284
- )
285
- parser.add_argument(
286
- "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
287
- )
288
- parser.add_argument("--num_train_epochs", type=int, default=1)
289
- parser.add_argument(
290
- "--max_train_steps",
291
- type=int,
292
- default=None,
293
- help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
294
- )
295
- parser.add_argument(
296
- "--checkpointing_steps",
297
- type=int,
298
- default=500,
299
- help=(
300
- "Save a checkpoint of the training state every X updates. Checkpoints can be used for resuming training via `--resume_from_checkpoint`. "
301
- "In the case that the checkpoint is better than the final trained model, the checkpoint can also be used for inference."
302
- "Using a checkpoint for inference requires separate loading of the original pipeline and the individual checkpointed model components."
303
- "See https://huggingface.co/docs/diffusers/main/en/training/dreambooth#performing-inference-using-a-saved-checkpoint for step by step"
304
- "instructions."
305
- ),
306
- )
307
- parser.add_argument(
308
- "--checkpoints_total_limit",
309
- type=int,
310
- default=None,
311
- help=("Max number of checkpoints to store."),
312
- )
313
- parser.add_argument(
314
- "--resume_from_checkpoint",
315
- type=str,
316
- default=None,
317
- help=(
318
- "Whether training should be resumed from a previous checkpoint. Use a path saved by"
319
- ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
320
- ),
321
- )
322
- parser.add_argument(
323
- "--gradient_accumulation_steps",
324
- type=int,
325
- default=1,
326
- help="Number of updates steps to accumulate before performing a backward/update pass.",
327
- )
328
- parser.add_argument(
329
- "--gradient_checkpointing",
330
- action="store_true",
331
- help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
332
- )
333
- parser.add_argument(
334
- "--learning_rate",
335
- type=float,
336
- default=5e-6,
337
- help="Initial learning rate (after the potential warmup period) to use.",
338
- )
339
- parser.add_argument(
340
- "--scale_lr",
341
- action="store_true",
342
- default=False,
343
- help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
344
- )
345
- parser.add_argument(
346
- "--lr_scheduler",
347
- type=str,
348
- default="constant",
349
- help=(
350
- 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
351
- ' "constant", "constant_with_warmup"]'
352
- ),
353
- )
354
- parser.add_argument(
355
- "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
356
- )
357
- parser.add_argument(
358
- "--lr_num_cycles",
359
- type=int,
360
- default=1,
361
- help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
362
- )
363
- parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.")
364
- parser.add_argument(
365
- "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
366
- )
367
- parser.add_argument(
368
- "--dataloader_num_workers",
369
- type=int,
370
- default=0,
371
- help=(
372
- "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
373
- ),
374
- )
375
- parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
376
- parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
377
- parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
378
- parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
379
- parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
380
- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
381
- parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
382
- parser.add_argument(
383
- "--hub_model_id",
384
- type=str,
385
- default=None,
386
- help="The name of the repository to keep in sync with the local `output_dir`.",
387
- )
388
- parser.add_argument(
389
- "--logging_dir",
390
- type=str,
391
- default="logs",
392
- help=(
393
- "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
394
- " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
395
- ),
396
- )
397
- parser.add_argument(
398
- "--allow_tf32",
399
- action="store_true",
400
- help=(
401
- "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
402
- " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
403
- ),
404
- )
405
- parser.add_argument(
406
- "--report_to",
407
- type=str,
408
- default="tensorboard",
409
- help=(
410
- 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
411
- ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
412
- ),
413
- )
414
- parser.add_argument(
415
- "--mixed_precision",
416
- type=str,
417
- default=None,
418
- choices=["no", "fp16", "bf16"],
419
- help=(
420
- "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
421
- " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
422
- " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
423
- ),
424
- )
425
- parser.add_argument(
426
- "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
427
- )
428
- parser.add_argument(
429
- "--set_grads_to_none",
430
- action="store_true",
431
- help=(
432
- "Save more memory by using setting grads to None instead of zero. Be aware, that this changes certain"
433
- " behaviors, so disable this argument if it causes any problems. More info:"
434
- " https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html"
435
- ),
436
- )
437
- parser.add_argument(
438
- "--dataset_name",
439
- type=str,
440
- default=None,
441
- help=(
442
- "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private,"
443
- " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
444
- " or to a folder containing files that 🤗 Datasets can understand."
445
- ),
446
- )
447
- parser.add_argument(
448
- "--dataset_config_name",
449
- type=str,
450
- default=None,
451
- help="The config of the Dataset, leave as None if there's only one config.",
452
- )
453
- parser.add_argument(
454
- "--train_data_dir",
455
- type=str,
456
- default=None,
457
- help=(
458
- "A folder containing the training data. Folder contents must follow the structure described in"
459
- " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file"
460
- " must exist to provide the captions for the images. Ignored if `dataset_name` is specified."
461
- ),
462
- )
463
- parser.add_argument(
464
- "--image_column", type=str, default="image", help="The column of the dataset containing the target image."
465
- )
466
- parser.add_argument(
467
- "--conditioning_image_column",
468
- type=str,
469
- default="conditioning_image",
470
- help="The column of the dataset containing the controlnet conditioning image.",
471
- )
472
- parser.add_argument(
473
- "--caption_column",
474
- type=str,
475
- default="text",
476
- help="The column of the dataset containing a caption or a list of captions.",
477
- )
478
- parser.add_argument(
479
- "--max_train_samples",
480
- type=int,
481
- default=None,
482
- help=(
483
- "For debugging purposes or quicker training, truncate the number of training examples to this "
484
- "value if set."
485
- ),
486
- )
487
- parser.add_argument(
488
- "--proportion_empty_prompts",
489
- type=float,
490
- default=0,
491
- help="Proportion of image prompts to be replaced with empty strings. Defaults to 0 (no prompt replacement).",
492
- )
493
- parser.add_argument(
494
- "--validation_prompt",
495
- type=str,
496
- default=None,
497
- nargs="+",
498
- help=(
499
- "A set of prompts evaluated every `--validation_steps` and logged to `--report_to`."
500
- " Provide either a matching number of `--validation_image`s, a single `--validation_image`"
501
- " to be used with all prompts, or a single prompt that will be used with all `--validation_image`s."
502
- ),
503
- )
504
- parser.add_argument(
505
- "--validation_image",
506
- type=str,
507
- default=None,
508
- nargs="+",
509
- help=(
510
- "A set of paths to the controlnet conditioning image be evaluated every `--validation_steps`"
511
- " and logged to `--report_to`. Provide either a matching number of `--validation_prompt`s, a"
512
- " a single `--validation_prompt` to be used with all `--validation_image`s, or a single"
513
- " `--validation_image` that will be used with all `--validation_prompt`s."
514
- ),
515
- )
516
- parser.add_argument(
517
- "--num_validation_images",
518
- type=int,
519
- default=4,
520
- help="Number of images to be generated for each `--validation_image`, `--validation_prompt` pair",
521
- )
522
- parser.add_argument(
523
- "--validation_steps",
524
- type=int,
525
- default=100,
526
- help=(
527
- "Run validation every X steps. Validation consists of running the prompt"
528
- " `args.validation_prompt` multiple times: `args.num_validation_images`"
529
- " and logging the images."
530
- ),
531
- )
532
- parser.add_argument(
533
- "--tracker_project_name",
534
- type=str,
535
- default="train_controlnet",
536
- help=(
537
- "The `project_name` argument passed to Accelerator.init_trackers for"
538
- " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator"
539
- ),
540
- )
541
-
542
- if input_args is not None:
543
- args = parser.parse_args(input_args)
544
- else:
545
- args = parser.parse_args()
546
-
547
- if args.dataset_name is None and args.train_data_dir is None:
548
- raise ValueError("Specify either `--dataset_name` or `--train_data_dir`")
549
-
550
- if args.dataset_name is not None and args.train_data_dir is not None:
551
- raise ValueError("Specify only one of `--dataset_name` or `--train_data_dir`")
552
-
553
- if args.proportion_empty_prompts < 0 or args.proportion_empty_prompts > 1:
554
- raise ValueError("`--proportion_empty_prompts` must be in the range [0, 1].")
555
-
556
- if args.validation_prompt is not None and args.validation_image is None:
557
- raise ValueError("`--validation_image` must be set if `--validation_prompt` is set")
558
-
559
- if args.validation_prompt is None and args.validation_image is not None:
560
- raise ValueError("`--validation_prompt` must be set if `--validation_image` is set")
561
-
562
- if (
563
- args.validation_image is not None
564
- and args.validation_prompt is not None
565
- and len(args.validation_image) != 1
566
- and len(args.validation_prompt) != 1
567
- and len(args.validation_image) != len(args.validation_prompt)
568
- ):
569
- raise ValueError(
570
- "Must provide either 1 `--validation_image`, 1 `--validation_prompt`,"
571
- " or the same number of `--validation_prompt`s and `--validation_image`s"
572
- )
573
-
574
- if args.resolution % 8 != 0:
575
- raise ValueError(
576
- "`--resolution` must be divisible by 8 for consistently sized encoded images between the VAE and the controlnet encoder."
577
- )
578
-
579
- return args
580
-
581
-
582
- def make_train_dataset(args, tokenizer, accelerator):
583
- # Get the datasets: you can either provide your own training and evaluation files (see below)
584
- # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub).
585
-
586
- # In distributed training, the load_dataset function guarantees that only one local process can concurrently
587
- # download the dataset.
588
- if args.dataset_name is not None:
589
- # Downloading and loading a dataset from the hub.
590
- dataset = load_dataset(
591
- args.dataset_name,
592
- args.dataset_config_name,
593
- cache_dir=args.cache_dir,
594
- )
595
- else:
596
- if args.train_data_dir is not None:
597
- dataset = load_dataset(
598
- args.train_data_dir,
599
- cache_dir=args.cache_dir,
600
- )
601
- # See more about loading custom images at
602
- # https://huggingface.co/docs/datasets/v2.0.0/en/dataset_script
603
-
604
- # Preprocessing the datasets.
605
- # We need to tokenize inputs and targets.
606
- column_names = dataset["train"].column_names
607
-
608
- # 6. Get the column names for input/target.
609
- if args.image_column is None:
610
- image_column = column_names[0]
611
- logger.info(f"image column defaulting to {image_column}")
612
- else:
613
- image_column = args.image_column
614
- if image_column not in column_names:
615
- raise ValueError(
616
- f"`--image_column` value '{args.image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
617
- )
618
-
619
- if args.caption_column is None:
620
- caption_column = column_names[1]
621
- logger.info(f"caption column defaulting to {caption_column}")
622
- else:
623
- caption_column = args.caption_column
624
- if caption_column not in column_names:
625
- raise ValueError(
626
- f"`--caption_column` value '{args.caption_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
627
- )
628
-
629
- if args.conditioning_image_column is None:
630
- conditioning_image_column = column_names[2]
631
- logger.info(f"conditioning image column defaulting to {conditioning_image_column}")
632
- else:
633
- conditioning_image_column = args.conditioning_image_column
634
- if conditioning_image_column not in column_names:
635
- raise ValueError(
636
- f"`--conditioning_image_column` value '{args.conditioning_image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
637
- )
638
-
639
- def tokenize_captions(examples, is_train=True):
640
- captions = []
641
- for caption in examples[caption_column]:
642
- if random.random() < args.proportion_empty_prompts:
643
- captions.append("")
644
- elif isinstance(caption, str):
645
- captions.append(caption)
646
- elif isinstance(caption, (list, np.ndarray)):
647
- # take a random caption if there are multiple
648
- captions.append(random.choice(caption) if is_train else caption[0])
649
- else:
650
- raise ValueError(
651
- f"Caption column `{caption_column}` should contain either strings or lists of strings."
652
- )
653
- inputs = tokenizer(
654
- captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt"
655
- )
656
- return inputs.input_ids
657
-
658
- image_transforms = transforms.Compose(
659
- [
660
- transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR),
661
- transforms.CenterCrop(args.resolution),
662
- transforms.ToTensor(),
663
- transforms.Normalize([0.5], [0.5]),
664
- ]
665
- )
666
-
667
- conditioning_image_transforms = transforms.Compose(
668
- [
669
- transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR),
670
- transforms.CenterCrop(args.resolution),
671
- transforms.ToTensor(),
672
- ]
673
- )
674
-
675
- def preprocess_train(examples):
676
- images = [image.convert("RGB") for image in examples[image_column]]
677
- images = [image_transforms(image) for image in images]
678
-
679
- conditioning_images = [image.convert("RGB") for image in examples[conditioning_image_column]]
680
- conditioning_images = [conditioning_image_transforms(image) for image in conditioning_images]
681
-
682
- examples["pixel_values"] = images
683
- examples["conditioning_pixel_values"] = conditioning_images
684
- examples["input_ids"] = tokenize_captions(examples)
685
-
686
- return examples
687
-
688
- with accelerator.main_process_first():
689
- if args.max_train_samples is not None:
690
- dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples))
691
- # Set the training transforms
692
- train_dataset = dataset["train"].with_transform(preprocess_train)
693
-
694
- return train_dataset
695
-
696
-
697
- def collate_fn(examples):
698
- pixel_values = torch.stack([example["pixel_values"] for example in examples])
699
- pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
700
-
701
- conditioning_pixel_values = torch.stack([example["conditioning_pixel_values"] for example in examples])
702
- conditioning_pixel_values = conditioning_pixel_values.to(memory_format=torch.contiguous_format).float()
703
-
704
- input_ids = torch.stack([example["input_ids"] for example in examples])
705
-
706
- return {
707
- "pixel_values": pixel_values,
708
- "conditioning_pixel_values": conditioning_pixel_values,
709
- "input_ids": input_ids,
710
- }
711
-
712
-
713
- def main(args):
714
- logging_dir = Path(args.output_dir, args.logging_dir)
715
-
716
- accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
717
-
718
- accelerator = Accelerator(
719
- gradient_accumulation_steps=args.gradient_accumulation_steps,
720
- mixed_precision=args.mixed_precision,
721
- log_with=args.report_to,
722
- project_config=accelerator_project_config,
723
- )
724
-
725
- # Make one log on every process with the configuration for debugging.
726
- logging.basicConfig(
727
- format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
728
- datefmt="%m/%d/%Y %H:%M:%S",
729
- level=logging.INFO,
730
- )
731
- logger.info(accelerator.state, main_process_only=False)
732
- if accelerator.is_local_main_process:
733
- transformers.utils.logging.set_verbosity_warning()
734
- diffusers.utils.logging.set_verbosity_info()
735
- else:
736
- transformers.utils.logging.set_verbosity_error()
737
- diffusers.utils.logging.set_verbosity_error()
738
-
739
- # If passed along, set the training seed now.
740
- if args.seed is not None:
741
- set_seed(args.seed)
742
-
743
- # Handle the repository creation
744
- if accelerator.is_main_process:
745
- if args.output_dir is not None:
746
- os.makedirs(args.output_dir, exist_ok=True)
747
-
748
- if args.push_to_hub:
749
- repo_id = create_repo(
750
- repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
751
- ).repo_id
752
-
753
- # Load the tokenizer
754
- if args.tokenizer_name:
755
- tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False)
756
- elif args.pretrained_model_name_or_path:
757
- tokenizer = AutoTokenizer.from_pretrained(
758
- args.pretrained_model_name_or_path,
759
- subfolder="tokenizer",
760
- revision=args.revision,
761
- use_fast=False,
762
- )
763
-
764
- # import correct text encoder class
765
- text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision)
766
-
767
- # Load scheduler and models
768
- noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
769
- text_encoder = text_encoder_cls.from_pretrained(
770
- args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
771
- )
772
- vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision)
773
- unet = UNet2DConditionModel.from_pretrained(
774
- args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
775
- )
776
-
777
- if args.controlnet_model_name_or_path:
778
- logger.info("Loading existing controlnet weights")
779
- controlnet = ControlNetModel.from_pretrained(args.controlnet_model_name_or_path)
780
- else:
781
- logger.info("Initializing controlnet weights from unet")
782
- controlnet = ControlNetModel.from_unet(unet)
783
-
784
- # `accelerate` 0.16.0 will have better support for customized saving
785
- if version.parse(accelerate.__version__) >= version.parse("0.16.0"):
786
- # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
787
- def save_model_hook(models, weights, output_dir):
788
- i = len(weights) - 1
789
-
790
- while len(weights) > 0:
791
- weights.pop()
792
- model = models[i]
793
-
794
- sub_dir = "controlnet"
795
- model.save_pretrained(os.path.join(output_dir, sub_dir))
796
-
797
- i -= 1
798
-
799
- def load_model_hook(models, input_dir):
800
- while len(models) > 0:
801
- # pop models so that they are not loaded again
802
- model = models.pop()
803
-
804
- # load diffusers style into model
805
- load_model = ControlNetModel.from_pretrained(input_dir, subfolder="controlnet")
806
- model.register_to_config(**load_model.config)
807
-
808
- model.load_state_dict(load_model.state_dict())
809
- del load_model
810
-
811
- accelerator.register_save_state_pre_hook(save_model_hook)
812
- accelerator.register_load_state_pre_hook(load_model_hook)
813
-
814
- vae.requires_grad_(False)
815
- unet.requires_grad_(False)
816
- text_encoder.requires_grad_(False)
817
- controlnet.train()
818
-
819
- if args.enable_xformers_memory_efficient_attention:
820
- if is_xformers_available():
821
- import xformers
822
-
823
- xformers_version = version.parse(xformers.__version__)
824
- if xformers_version == version.parse("0.0.16"):
825
- logger.warn(
826
- "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
827
- )
828
- unet.enable_xformers_memory_efficient_attention()
829
- controlnet.enable_xformers_memory_efficient_attention()
830
- else:
831
- raise ValueError("xformers is not available. Make sure it is installed correctly")
832
-
833
- if args.gradient_checkpointing:
834
- controlnet.enable_gradient_checkpointing()
835
-
836
- # Check that all trainable models are in full precision
837
- low_precision_error_string = (
838
- " Please make sure to always have all model weights in full float32 precision when starting training - even if"
839
- " doing mixed precision training, copy of the weights should still be float32."
840
- )
841
-
842
- if accelerator.unwrap_model(controlnet).dtype != torch.float32:
843
- raise ValueError(
844
- f"Controlnet loaded as datatype {accelerator.unwrap_model(controlnet).dtype}. {low_precision_error_string}"
845
- )
846
-
847
- # Enable TF32 for faster training on Ampere GPUs,
848
- # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
849
- if args.allow_tf32:
850
- torch.backends.cuda.matmul.allow_tf32 = True
851
-
852
- if args.scale_lr:
853
- args.learning_rate = (
854
- args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
855
- )
856
-
857
- # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
858
- if args.use_8bit_adam:
859
- try:
860
- import bitsandbytes as bnb
861
- except ImportError:
862
- raise ImportError(
863
- "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
864
- )
865
-
866
- optimizer_class = bnb.optim.AdamW8bit
867
- else:
868
- optimizer_class = torch.optim.AdamW
869
-
870
- # Optimizer creation
871
- params_to_optimize = controlnet.parameters()
872
- optimizer = optimizer_class(
873
- params_to_optimize,
874
- lr=args.learning_rate,
875
- betas=(args.adam_beta1, args.adam_beta2),
876
- weight_decay=args.adam_weight_decay,
877
- eps=args.adam_epsilon,
878
- )
879
-
880
- train_dataset = make_train_dataset(args, tokenizer, accelerator)
881
-
882
- train_dataloader = torch.utils.data.DataLoader(
883
- train_dataset,
884
- shuffle=True,
885
- collate_fn=collate_fn,
886
- batch_size=args.train_batch_size,
887
- num_workers=args.dataloader_num_workers,
888
- )
889
-
890
- # Scheduler and math around the number of training steps.
891
- overrode_max_train_steps = False
892
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
893
- if args.max_train_steps is None:
894
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
895
- overrode_max_train_steps = True
896
-
897
- lr_scheduler = get_scheduler(
898
- args.lr_scheduler,
899
- optimizer=optimizer,
900
- num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
901
- num_training_steps=args.max_train_steps * accelerator.num_processes,
902
- num_cycles=args.lr_num_cycles,
903
- power=args.lr_power,
904
- )
905
-
906
- # Prepare everything with our `accelerator`.
907
- controlnet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
908
- controlnet, optimizer, train_dataloader, lr_scheduler
909
- )
910
-
911
- # For mixed precision training we cast the text_encoder and vae weights to half-precision
912
- # as these models are only used for inference, keeping weights in full precision is not required.
913
- weight_dtype = torch.float32
914
- if accelerator.mixed_precision == "fp16":
915
- weight_dtype = torch.float16
916
- elif accelerator.mixed_precision == "bf16":
917
- weight_dtype = torch.bfloat16
918
-
919
- # Move vae, unet and text_encoder to device and cast to weight_dtype
920
- vae.to(accelerator.device, dtype=weight_dtype)
921
- unet.to(accelerator.device, dtype=weight_dtype)
922
- text_encoder.to(accelerator.device, dtype=weight_dtype)
923
-
924
- # We need to recalculate our total training steps as the size of the training dataloader may have changed.
925
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
926
- if overrode_max_train_steps:
927
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
928
- # Afterwards we recalculate our number of training epochs
929
- args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
930
-
931
- # We need to initialize the trackers we use, and also store our configuration.
932
- # The trackers initializes automatically on the main process.
933
- if accelerator.is_main_process:
934
- tracker_config = dict(vars(args))
935
-
936
- # tensorboard cannot handle list types for config
937
- tracker_config.pop("validation_prompt")
938
- tracker_config.pop("validation_image")
939
-
940
- accelerator.init_trackers(args.tracker_project_name, config=tracker_config)
941
-
942
- # Train!
943
- total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
944
-
945
- logger.info("***** Running training *****")
946
- logger.info(f" Num examples = {len(train_dataset)}")
947
- logger.info(f" Num batches each epoch = {len(train_dataloader)}")
948
- logger.info(f" Num Epochs = {args.num_train_epochs}")
949
- logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
950
- logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
951
- logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
952
- logger.info(f" Total optimization steps = {args.max_train_steps}")
953
- global_step = 0
954
- first_epoch = 0
955
-
956
- # Potentially load in the weights and states from a previous save
957
- if args.resume_from_checkpoint:
958
- if args.resume_from_checkpoint != "latest":
959
- path = os.path.basename(args.resume_from_checkpoint)
960
- else:
961
- # Get the most recent checkpoint
962
- dirs = os.listdir(args.output_dir)
963
- dirs = [d for d in dirs if d.startswith("checkpoint")]
964
- dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
965
- path = dirs[-1] if len(dirs) > 0 else None
966
-
967
- if path is None:
968
- accelerator.print(
969
- f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
970
- )
971
- args.resume_from_checkpoint = None
972
- initial_global_step = 0
973
- else:
974
- accelerator.print(f"Resuming from checkpoint {path}")
975
- accelerator.load_state(os.path.join(args.output_dir, path))
976
- global_step = int(path.split("-")[1])
977
-
978
- initial_global_step = global_step
979
- first_epoch = global_step // num_update_steps_per_epoch
980
- else:
981
- initial_global_step = 0
982
-
983
- progress_bar = tqdm(
984
- range(0, args.max_train_steps),
985
- initial=initial_global_step,
986
- desc="Steps",
987
- # Only show the progress bar once on each machine.
988
- disable=not accelerator.is_local_main_process,
989
- )
990
-
991
- image_logs = None
992
- for epoch in range(first_epoch, args.num_train_epochs):
993
- for step, batch in enumerate(train_dataloader):
994
- with accelerator.accumulate(controlnet):
995
- # Convert images to latent space
996
- latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample()
997
- latents = latents * vae.config.scaling_factor
998
-
999
- # Sample noise that we'll add to the latents
1000
- noise = torch.randn_like(latents)
1001
- bsz = latents.shape[0]
1002
- # Sample a random timestep for each image
1003
- timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
1004
- timesteps = timesteps.long()
1005
-
1006
- # Add noise to the latents according to the noise magnitude at each timestep
1007
- # (this is the forward diffusion process)
1008
- noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
1009
-
1010
- # Get the text embedding for conditioning
1011
- encoder_hidden_states = text_encoder(batch["input_ids"])[0]
1012
-
1013
- controlnet_image = batch["conditioning_pixel_values"].to(dtype=weight_dtype)
1014
-
1015
- down_block_res_samples, mid_block_res_sample = controlnet(
1016
- noisy_latents,
1017
- timesteps,
1018
- encoder_hidden_states=encoder_hidden_states,
1019
- controlnet_cond=controlnet_image,
1020
- return_dict=False,
1021
- )
1022
-
1023
- # Predict the noise residual
1024
- model_pred = unet(
1025
- noisy_latents,
1026
- timesteps,
1027
- encoder_hidden_states=encoder_hidden_states,
1028
- down_block_additional_residuals=[
1029
- sample.to(dtype=weight_dtype) for sample in down_block_res_samples
1030
- ],
1031
- mid_block_additional_residual=mid_block_res_sample.to(dtype=weight_dtype),
1032
- ).sample
1033
-
1034
- # Get the target for loss depending on the prediction type
1035
- if noise_scheduler.config.prediction_type == "epsilon":
1036
- target = noise
1037
- elif noise_scheduler.config.prediction_type == "v_prediction":
1038
- target = noise_scheduler.get_velocity(latents, noise, timesteps)
1039
- else:
1040
- raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
1041
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
1042
-
1043
- accelerator.backward(loss)
1044
- if accelerator.sync_gradients:
1045
- params_to_clip = controlnet.parameters()
1046
- accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
1047
- optimizer.step()
1048
- lr_scheduler.step()
1049
- optimizer.zero_grad(set_to_none=args.set_grads_to_none)
1050
-
1051
- # Checks if the accelerator has performed an optimization step behind the scenes
1052
- if accelerator.sync_gradients:
1053
- progress_bar.update(1)
1054
- global_step += 1
1055
-
1056
- if accelerator.is_main_process:
1057
- if global_step % args.checkpointing_steps == 0:
1058
- # _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
1059
- if args.checkpoints_total_limit is not None:
1060
- checkpoints = os.listdir(args.output_dir)
1061
- checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
1062
- checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
1063
-
1064
- # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
1065
- if len(checkpoints) >= args.checkpoints_total_limit:
1066
- num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
1067
- removing_checkpoints = checkpoints[0:num_to_remove]
1068
-
1069
- logger.info(
1070
- f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
1071
- )
1072
- logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
1073
-
1074
- for removing_checkpoint in removing_checkpoints:
1075
- removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
1076
- shutil.rmtree(removing_checkpoint)
1077
-
1078
- save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
1079
- accelerator.save_state(save_path)
1080
- logger.info(f"Saved state to {save_path}")
1081
-
1082
- if args.validation_prompt is not None and global_step % args.validation_steps == 0:
1083
- image_logs = log_validation(
1084
- vae,
1085
- text_encoder,
1086
- tokenizer,
1087
- unet,
1088
- controlnet,
1089
- args,
1090
- accelerator,
1091
- weight_dtype,
1092
- global_step,
1093
- )
1094
-
1095
- logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
1096
- progress_bar.set_postfix(**logs)
1097
- accelerator.log(logs, step=global_step)
1098
-
1099
- if global_step >= args.max_train_steps:
1100
- break
1101
-
1102
- # Create the pipeline using using the trained modules and save it.
1103
- accelerator.wait_for_everyone()
1104
- if accelerator.is_main_process:
1105
- controlnet = accelerator.unwrap_model(controlnet)
1106
- controlnet.save_pretrained(args.output_dir)
1107
-
1108
- if args.push_to_hub:
1109
- save_model_card(
1110
- repo_id,
1111
- image_logs=image_logs,
1112
- base_model=args.pretrained_model_name_or_path,
1113
- repo_folder=args.output_dir,
1114
- )
1115
- upload_folder(
1116
- repo_id=repo_id,
1117
- folder_path=args.output_dir,
1118
- commit_message="End of training",
1119
- ignore_patterns=["step_*", "epoch_*"],
1120
- )
1121
-
1122
- accelerator.end_training()
1123
-
1124
-
1125
- if __name__ == "__main__":
1126
- args = parse_args()
1127
- main(args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_original_controlnet_to_diffusers.py DELETED
@@ -1,109 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 The HuggingFace Inc. team.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """ Conversion script for stable diffusion checkpoints which _only_ contain a contrlnet. """
16
-
17
- import argparse
18
-
19
- from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
20
-
21
-
22
- if __name__ == "__main__":
23
- parser = argparse.ArgumentParser()
24
-
25
- parser.add_argument(
26
- "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
27
- )
28
- parser.add_argument(
29
- "--original_config_file",
30
- type=str,
31
- required=True,
32
- help="The YAML config file corresponding to the original architecture.",
33
- )
34
- parser.add_argument(
35
- "--num_in_channels",
36
- default=None,
37
- type=int,
38
- help="The number of input channels. If `None` number of input channels will be automatically inferred.",
39
- )
40
- parser.add_argument(
41
- "--image_size",
42
- default=512,
43
- type=int,
44
- help=(
45
- "The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
46
- " Base. Use 768 for Stable Diffusion v2."
47
- ),
48
- )
49
- parser.add_argument(
50
- "--extract_ema",
51
- action="store_true",
52
- help=(
53
- "Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
54
- " or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
55
- " higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
56
- ),
57
- )
58
- parser.add_argument(
59
- "--upcast_attention",
60
- action="store_true",
61
- help=(
62
- "Whether the attention computation should always be upcasted. This is necessary when running stable"
63
- " diffusion 2.1."
64
- ),
65
- )
66
- parser.add_argument(
67
- "--from_safetensors",
68
- action="store_true",
69
- help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
70
- )
71
- parser.add_argument(
72
- "--to_safetensors",
73
- action="store_true",
74
- help="Whether to store pipeline in safetensors format or not.",
75
- )
76
- parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
77
- parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
78
-
79
- # small workaround to get argparser to parse a boolean input as either true _or_ false
80
- def parse_bool(string):
81
- if string == "True":
82
- return True
83
- elif string == "False":
84
- return False
85
- else:
86
- raise ValueError(f"could not parse string as bool {string}")
87
-
88
- parser.add_argument(
89
- "--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
90
- )
91
-
92
- parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
93
-
94
- args = parser.parse_args()
95
-
96
- controlnet = download_controlnet_from_original_ckpt(
97
- checkpoint_path=args.checkpoint_path,
98
- original_config_file=args.original_config_file,
99
- image_size=args.image_size,
100
- extract_ema=args.extract_ema,
101
- num_in_channels=args.num_in_channels,
102
- upcast_attention=args.upcast_attention,
103
- from_safetensors=args.from_safetensors,
104
- device=args.device,
105
- use_linear_projection=args.use_linear_projection,
106
- cross_attention_dim=args.cross_attention_dim,
107
- )
108
-
109
- controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/fast_rcnn/fast_rcnn_r101_fpn_2x_coco.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './fast_rcnn_r50_fpn_2x_coco.py'
2
- model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/core/evaluation/bbox_overlaps.py DELETED
@@ -1,48 +0,0 @@
1
- import numpy as np
2
-
3
-
4
- def bbox_overlaps(bboxes1, bboxes2, mode='iou', eps=1e-6):
5
- """Calculate the ious between each bbox of bboxes1 and bboxes2.
6
-
7
- Args:
8
- bboxes1(ndarray): shape (n, 4)
9
- bboxes2(ndarray): shape (k, 4)
10
- mode(str): iou (intersection over union) or iof (intersection
11
- over foreground)
12
-
13
- Returns:
14
- ious(ndarray): shape (n, k)
15
- """
16
-
17
- assert mode in ['iou', 'iof']
18
-
19
- bboxes1 = bboxes1.astype(np.float32)
20
- bboxes2 = bboxes2.astype(np.float32)
21
- rows = bboxes1.shape[0]
22
- cols = bboxes2.shape[0]
23
- ious = np.zeros((rows, cols), dtype=np.float32)
24
- if rows * cols == 0:
25
- return ious
26
- exchange = False
27
- if bboxes1.shape[0] > bboxes2.shape[0]:
28
- bboxes1, bboxes2 = bboxes2, bboxes1
29
- ious = np.zeros((cols, rows), dtype=np.float32)
30
- exchange = True
31
- area1 = (bboxes1[:, 2] - bboxes1[:, 0]) * (bboxes1[:, 3] - bboxes1[:, 1])
32
- area2 = (bboxes2[:, 2] - bboxes2[:, 0]) * (bboxes2[:, 3] - bboxes2[:, 1])
33
- for i in range(bboxes1.shape[0]):
34
- x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])
35
- y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])
36
- x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])
37
- y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])
38
- overlap = np.maximum(x_end - x_start, 0) * np.maximum(
39
- y_end - y_start, 0)
40
- if mode == 'iou':
41
- union = area1[i] + area2 - overlap
42
- else:
43
- union = area1[i] if not exchange else area2
44
- union = np.maximum(union, eps)
45
- ious[i, :] = overlap / union
46
- if exchange:
47
- ious = ious.T
48
- return ious
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/bbox_heads/__init__.py DELETED
@@ -1,13 +0,0 @@
1
- from .bbox_head import BBoxHead
2
- from .convfc_bbox_head import (ConvFCBBoxHead, Shared2FCBBoxHead,
3
- Shared4Conv1FCBBoxHead)
4
- from .dii_head import DIIHead
5
- from .double_bbox_head import DoubleConvFCBBoxHead
6
- from .sabl_head import SABLHead
7
- from .scnet_bbox_head import SCNetBBoxHead
8
-
9
- __all__ = [
10
- 'BBoxHead', 'ConvFCBBoxHead', 'Shared2FCBBoxHead',
11
- 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'SABLHead', 'DIIHead',
12
- 'SCNetBBoxHead'
13
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r18b-d8_512x1024_80k_cityscapes.py DELETED
@@ -1,9 +0,0 @@
1
- _base_ = './deeplabv3_r50-d8_512x1024_80k_cityscapes.py'
2
- model = dict(
3
- pretrained='torchvision://resnet18',
4
- backbone=dict(type='ResNet', depth=18),
5
- decode_head=dict(
6
- in_channels=512,
7
- channels=128,
8
- ),
9
- auxiliary_head=dict(in_channels=256, channels=64))
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/__init__.py DELETED
@@ -1,47 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- from .base_module import BaseModule, ModuleList, Sequential
3
- from .base_runner import BaseRunner
4
- from .builder import RUNNERS, build_runner
5
- from .checkpoint import (CheckpointLoader, _load_checkpoint,
6
- _load_checkpoint_with_prefix, load_checkpoint,
7
- load_state_dict, save_checkpoint, weights_to_cpu)
8
- from .default_constructor import DefaultRunnerConstructor
9
- from .dist_utils import (allreduce_grads, allreduce_params, get_dist_info,
10
- init_dist, master_only)
11
- from .epoch_based_runner import EpochBasedRunner, Runner
12
- from .fp16_utils import LossScaler, auto_fp16, force_fp32, wrap_fp16_model
13
- from .hooks import (HOOKS, CheckpointHook, ClosureHook, DistEvalHook,
14
- DistSamplerSeedHook, DvcliveLoggerHook, EMAHook, EvalHook,
15
- Fp16OptimizerHook, GradientCumulativeFp16OptimizerHook,
16
- GradientCumulativeOptimizerHook, Hook, IterTimerHook,
17
- LoggerHook, LrUpdaterHook, MlflowLoggerHook,
18
- NeptuneLoggerHook, OptimizerHook, PaviLoggerHook,
19
- SyncBuffersHook, TensorboardLoggerHook, TextLoggerHook,
20
- WandbLoggerHook)
21
- from .iter_based_runner import IterBasedRunner, IterLoader
22
- from .log_buffer import LogBuffer
23
- from .optimizer import (OPTIMIZER_BUILDERS, OPTIMIZERS,
24
- DefaultOptimizerConstructor, build_optimizer,
25
- build_optimizer_constructor)
26
- from .priority import Priority, get_priority
27
- from .utils import get_host_info, get_time_str, obj_from_dict, set_random_seed
28
-
29
- __all__ = [
30
- 'BaseRunner', 'Runner', 'EpochBasedRunner', 'IterBasedRunner', 'LogBuffer',
31
- 'HOOKS', 'Hook', 'CheckpointHook', 'ClosureHook', 'LrUpdaterHook',
32
- 'OptimizerHook', 'IterTimerHook', 'DistSamplerSeedHook', 'LoggerHook',
33
- 'PaviLoggerHook', 'TextLoggerHook', 'TensorboardLoggerHook',
34
- 'NeptuneLoggerHook', 'WandbLoggerHook', 'MlflowLoggerHook',
35
- 'DvcliveLoggerHook', '_load_checkpoint', 'load_state_dict',
36
- 'load_checkpoint', 'weights_to_cpu', 'save_checkpoint', 'Priority',
37
- 'get_priority', 'get_host_info', 'get_time_str', 'obj_from_dict',
38
- 'init_dist', 'get_dist_info', 'master_only', 'OPTIMIZER_BUILDERS',
39
- 'OPTIMIZERS', 'DefaultOptimizerConstructor', 'build_optimizer',
40
- 'build_optimizer_constructor', 'IterLoader', 'set_random_seed',
41
- 'auto_fp16', 'force_fp32', 'wrap_fp16_model', 'Fp16OptimizerHook',
42
- 'SyncBuffersHook', 'EMAHook', 'build_runner', 'RUNNERS', 'allreduce_grads',
43
- 'allreduce_params', 'LossScaler', 'CheckpointLoader', 'BaseModule',
44
- '_load_checkpoint_with_prefix', 'EvalHook', 'DistEvalHook', 'Sequential',
45
- 'ModuleList', 'GradientCumulativeOptimizerHook',
46
- 'GradientCumulativeFp16OptimizerHook', 'DefaultRunnerConstructor'
47
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnthonyTruchetPoC/persistent-docker/doc/conf.py DELETED
@@ -1,52 +0,0 @@
1
- # Configuration file for the Sphinx documentation builder.
2
- #
3
- # For the full list of built-in configuration values, see the documentation:
4
- # https://www.sphinx-doc.org/en/master/usage/configuration.html
5
-
6
- # -- Project information -----------------------------------------------------
7
- # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
8
-
9
- project = "Template Python project"
10
- copyright = "2023, Polyconseil"
11
- author = "Anthony Truchet"
12
- release = "0.1"
13
-
14
- # -- General configuration ---------------------------------------------------
15
- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
16
-
17
- templates_path = ["_templates"]
18
- exclude_patterns = []
19
-
20
-
21
- extensions = [
22
- "sphinx.ext.autodoc",
23
- "sphinx.ext.autosummary",
24
- "sphinx.ext.napoleon",
25
- "sphinx.ext.todo",
26
- "myst_parser",
27
- "sphinx_rtd_theme",
28
- ]
29
-
30
- autodoc_typehints = "signature"
31
- autodoc_default_options = {
32
- "members": True,
33
- "member-order": "bysource",
34
- "special-members": "__call__",
35
- "undoc-members": True,
36
- "exclude-members": "__weakref__",
37
- "show-inheritance": True,
38
- "autodoc_typehints": "signature",
39
- }
40
-
41
- autosummary_generate = True
42
-
43
- # https://www.sphinx-doc.org/en/master/usage/extensions/todo.html
44
- todo_include_todos = True
45
-
46
- # -- Options for HTML output -------------------------------------------------
47
- # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
48
-
49
- # html_theme = 'alabaster'
50
- html_theme = "sphinx_rtd_theme"
51
-
52
- html_static_path = ["_static"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/main.py DELETED
@@ -1,12 +0,0 @@
1
- from typing import List, Optional
2
-
3
-
4
- def main(args: Optional[List[str]] = None) -> int:
5
- """This is preserved for old console scripts that may still be referencing
6
- it.
7
-
8
- For additional details, see https://github.com/pypa/pip/issues/7498.
9
- """
10
- from pip._internal.utils.entrypoints import _wrapper
11
-
12
- return _wrapper(args)
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/metadata/pkg_resources.py DELETED
@@ -1,270 +0,0 @@
1
- import email.message
2
- import email.parser
3
- import logging
4
- import os
5
- import zipfile
6
- from typing import Collection, Iterable, Iterator, List, Mapping, NamedTuple, Optional
7
-
8
- from pip._vendor import pkg_resources
9
- from pip._vendor.packaging.requirements import Requirement
10
- from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
11
- from pip._vendor.packaging.version import parse as parse_version
12
-
13
- from pip._internal.exceptions import InvalidWheel, NoneMetadataError, UnsupportedWheel
14
- from pip._internal.utils.egg_link import egg_link_path_from_location
15
- from pip._internal.utils.misc import display_path, normalize_path
16
- from pip._internal.utils.wheel import parse_wheel, read_wheel_metadata_file
17
-
18
- from .base import (
19
- BaseDistribution,
20
- BaseEntryPoint,
21
- BaseEnvironment,
22
- DistributionVersion,
23
- InfoPath,
24
- Wheel,
25
- )
26
-
27
- logger = logging.getLogger(__name__)
28
-
29
-
30
- class EntryPoint(NamedTuple):
31
- name: str
32
- value: str
33
- group: str
34
-
35
-
36
- class InMemoryMetadata:
37
- """IMetadataProvider that reads metadata files from a dictionary.
38
-
39
- This also maps metadata decoding exceptions to our internal exception type.
40
- """
41
-
42
- def __init__(self, metadata: Mapping[str, bytes], wheel_name: str) -> None:
43
- self._metadata = metadata
44
- self._wheel_name = wheel_name
45
-
46
- def has_metadata(self, name: str) -> bool:
47
- return name in self._metadata
48
-
49
- def get_metadata(self, name: str) -> str:
50
- try:
51
- return self._metadata[name].decode()
52
- except UnicodeDecodeError as e:
53
- # Augment the default error with the origin of the file.
54
- raise UnsupportedWheel(
55
- f"Error decoding metadata for {self._wheel_name}: {e} in {name} file"
56
- )
57
-
58
- def get_metadata_lines(self, name: str) -> Iterable[str]:
59
- return pkg_resources.yield_lines(self.get_metadata(name))
60
-
61
- def metadata_isdir(self, name: str) -> bool:
62
- return False
63
-
64
- def metadata_listdir(self, name: str) -> List[str]:
65
- return []
66
-
67
- def run_script(self, script_name: str, namespace: str) -> None:
68
- pass
69
-
70
-
71
- class Distribution(BaseDistribution):
72
- def __init__(self, dist: pkg_resources.Distribution) -> None:
73
- self._dist = dist
74
-
75
- @classmethod
76
- def from_directory(cls, directory: str) -> BaseDistribution:
77
- dist_dir = directory.rstrip(os.sep)
78
-
79
- # Build a PathMetadata object, from path to metadata. :wink:
80
- base_dir, dist_dir_name = os.path.split(dist_dir)
81
- metadata = pkg_resources.PathMetadata(base_dir, dist_dir)
82
-
83
- # Determine the correct Distribution object type.
84
- if dist_dir.endswith(".egg-info"):
85
- dist_cls = pkg_resources.Distribution
86
- dist_name = os.path.splitext(dist_dir_name)[0]
87
- else:
88
- assert dist_dir.endswith(".dist-info")
89
- dist_cls = pkg_resources.DistInfoDistribution
90
- dist_name = os.path.splitext(dist_dir_name)[0].split("-")[0]
91
-
92
- dist = dist_cls(base_dir, project_name=dist_name, metadata=metadata)
93
- return cls(dist)
94
-
95
- @classmethod
96
- def from_metadata_file_contents(
97
- cls,
98
- metadata_contents: bytes,
99
- filename: str,
100
- project_name: str,
101
- ) -> BaseDistribution:
102
- metadata_dict = {
103
- "METADATA": metadata_contents,
104
- }
105
- dist = pkg_resources.DistInfoDistribution(
106
- location=filename,
107
- metadata=InMemoryMetadata(metadata_dict, filename),
108
- project_name=project_name,
109
- )
110
- return cls(dist)
111
-
112
- @classmethod
113
- def from_wheel(cls, wheel: Wheel, name: str) -> BaseDistribution:
114
- try:
115
- with wheel.as_zipfile() as zf:
116
- info_dir, _ = parse_wheel(zf, name)
117
- metadata_dict = {
118
- path.split("/", 1)[-1]: read_wheel_metadata_file(zf, path)
119
- for path in zf.namelist()
120
- if path.startswith(f"{info_dir}/")
121
- }
122
- except zipfile.BadZipFile as e:
123
- raise InvalidWheel(wheel.location, name) from e
124
- except UnsupportedWheel as e:
125
- raise UnsupportedWheel(f"{name} has an invalid wheel, {e}")
126
- dist = pkg_resources.DistInfoDistribution(
127
- location=wheel.location,
128
- metadata=InMemoryMetadata(metadata_dict, wheel.location),
129
- project_name=name,
130
- )
131
- return cls(dist)
132
-
133
- @property
134
- def location(self) -> Optional[str]:
135
- return self._dist.location
136
-
137
- @property
138
- def installed_location(self) -> Optional[str]:
139
- egg_link = egg_link_path_from_location(self.raw_name)
140
- if egg_link:
141
- location = egg_link
142
- elif self.location:
143
- location = self.location
144
- else:
145
- return None
146
- return normalize_path(location)
147
-
148
- @property
149
- def info_location(self) -> Optional[str]:
150
- return self._dist.egg_info
151
-
152
- @property
153
- def installed_by_distutils(self) -> bool:
154
- # A distutils-installed distribution is provided by FileMetadata. This
155
- # provider has a "path" attribute not present anywhere else. Not the
156
- # best introspection logic, but pip has been doing this for a long time.
157
- try:
158
- return bool(self._dist._provider.path)
159
- except AttributeError:
160
- return False
161
-
162
- @property
163
- def canonical_name(self) -> NormalizedName:
164
- return canonicalize_name(self._dist.project_name)
165
-
166
- @property
167
- def version(self) -> DistributionVersion:
168
- return parse_version(self._dist.version)
169
-
170
- def is_file(self, path: InfoPath) -> bool:
171
- return self._dist.has_metadata(str(path))
172
-
173
- def iter_distutils_script_names(self) -> Iterator[str]:
174
- yield from self._dist.metadata_listdir("scripts")
175
-
176
- def read_text(self, path: InfoPath) -> str:
177
- name = str(path)
178
- if not self._dist.has_metadata(name):
179
- raise FileNotFoundError(name)
180
- content = self._dist.get_metadata(name)
181
- if content is None:
182
- raise NoneMetadataError(self, name)
183
- return content
184
-
185
- def iter_entry_points(self) -> Iterable[BaseEntryPoint]:
186
- for group, entries in self._dist.get_entry_map().items():
187
- for name, entry_point in entries.items():
188
- name, _, value = str(entry_point).partition("=")
189
- yield EntryPoint(name=name.strip(), value=value.strip(), group=group)
190
-
191
- def _metadata_impl(self) -> email.message.Message:
192
- """
193
- :raises NoneMetadataError: if the distribution reports `has_metadata()`
194
- True but `get_metadata()` returns None.
195
- """
196
- if isinstance(self._dist, pkg_resources.DistInfoDistribution):
197
- metadata_name = "METADATA"
198
- else:
199
- metadata_name = "PKG-INFO"
200
- try:
201
- metadata = self.read_text(metadata_name)
202
- except FileNotFoundError:
203
- if self.location:
204
- displaying_path = display_path(self.location)
205
- else:
206
- displaying_path = repr(self.location)
207
- logger.warning("No metadata found in %s", displaying_path)
208
- metadata = ""
209
- feed_parser = email.parser.FeedParser()
210
- feed_parser.feed(metadata)
211
- return feed_parser.close()
212
-
213
- def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]:
214
- if extras: # pkg_resources raises on invalid extras, so we sanitize.
215
- extras = frozenset(extras).intersection(self._dist.extras)
216
- return self._dist.requires(extras)
217
-
218
- def iter_provided_extras(self) -> Iterable[str]:
219
- return self._dist.extras
220
-
221
-
222
- class Environment(BaseEnvironment):
223
- def __init__(self, ws: pkg_resources.WorkingSet) -> None:
224
- self._ws = ws
225
-
226
- @classmethod
227
- def default(cls) -> BaseEnvironment:
228
- return cls(pkg_resources.working_set)
229
-
230
- @classmethod
231
- def from_paths(cls, paths: Optional[List[str]]) -> BaseEnvironment:
232
- return cls(pkg_resources.WorkingSet(paths))
233
-
234
- def _iter_distributions(self) -> Iterator[BaseDistribution]:
235
- for dist in self._ws:
236
- yield Distribution(dist)
237
-
238
- def _search_distribution(self, name: str) -> Optional[BaseDistribution]:
239
- """Find a distribution matching the ``name`` in the environment.
240
-
241
- This searches from *all* distributions available in the environment, to
242
- match the behavior of ``pkg_resources.get_distribution()``.
243
- """
244
- canonical_name = canonicalize_name(name)
245
- for dist in self.iter_all_distributions():
246
- if dist.canonical_name == canonical_name:
247
- return dist
248
- return None
249
-
250
- def get_distribution(self, name: str) -> Optional[BaseDistribution]:
251
- # Search the distribution by looking through the working set.
252
- dist = self._search_distribution(name)
253
- if dist:
254
- return dist
255
-
256
- # If distribution could not be found, call working_set.require to
257
- # update the working set, and try to find the distribution again.
258
- # This might happen for e.g. when you install a package twice, once
259
- # using setup.py develop and again using setup.py install. Now when
260
- # running pip uninstall twice, the package gets removed from the
261
- # working set in the first uninstall, so we have to populate the
262
- # working set again so that pip knows about it and the packages gets
263
- # picked up and is successfully uninstalled the second time too.
264
- try:
265
- # We didn't pass in any version specifiers, so this can never
266
- # raise pkg_resources.VersionConflict.
267
- self._ws.require(name)
268
- except pkg_resources.DistributionNotFound:
269
- return None
270
- return self._search_distribution(name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Atualli/yoloxTeste/yoloxdetect2/configs/yolov3.py DELETED
@@ -1,33 +0,0 @@
1
- #!/usr/bin/env python3
2
- # -*- coding:utf-8 -*-
3
- # Copyright (c) Megvii, Inc. and its affiliates.
4
-
5
- import os
6
-
7
- import torch.nn as nn
8
-
9
- from yolox.exp import Exp as MyExp
10
-
11
-
12
- class Exp(MyExp):
13
- def __init__(self):
14
- super(Exp, self).__init__()
15
- self.depth = 1.0
16
- self.width = 1.0
17
- self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
18
-
19
- def get_model(self, sublinear=False):
20
- def init_yolo(M):
21
- for m in M.modules():
22
- if isinstance(m, nn.BatchNorm2d):
23
- m.eps = 1e-3
24
- m.momentum = 0.03
25
- if "model" not in self.__dict__:
26
- from yolox.models import YOLOX, YOLOFPN, YOLOXHead
27
- backbone = YOLOFPN()
28
- head = YOLOXHead(self.num_classes, self.width, in_channels=[128, 256, 512], act="lrelu")
29
- self.model = YOLOX(backbone, head)
30
- self.model.apply(init_yolo)
31
- self.model.head.initialize_biases(1e-2)
32
-
33
- return self.model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Audio-AGI/WavJourney/VoiceParser/pre_kmeans_hubert.py DELETED
@@ -1,106 +0,0 @@
1
- """
2
- Modified HuBERT model without kmeans.
3
- Original author: https://github.com/lucidrains/
4
- Modified by: https://www.github.com/gitmylo/
5
- License: MIT
6
- """
7
-
8
- # Modified code from https://github.com/lucidrains/audiolm-pytorch/blob/main/audiolm_pytorch/hubert_kmeans.py
9
-
10
- from pathlib import Path
11
-
12
- import torch
13
- from torch import nn
14
- from einops import pack, unpack
15
-
16
- import fairseq
17
-
18
- from torchaudio.functional import resample
19
-
20
- from audiolm_pytorch.utils import curtail_to_multiple
21
-
22
- import logging
23
- logging.root.setLevel(logging.ERROR)
24
-
25
-
26
- def exists(val):
27
- return val is not None
28
-
29
-
30
- def default(val, d):
31
- return val if exists(val) else d
32
-
33
-
34
- class CustomHubert(nn.Module):
35
- """
36
- checkpoint and kmeans can be downloaded at https://github.com/facebookresearch/fairseq/tree/main/examples/hubert
37
- or you can train your own
38
- """
39
-
40
- def __init__(
41
- self,
42
- checkpoint_path,
43
- target_sample_hz=16000,
44
- seq_len_multiple_of=None,
45
- output_layer=9,
46
- device=None
47
- ):
48
- super().__init__()
49
- self.target_sample_hz = target_sample_hz
50
- self.seq_len_multiple_of = seq_len_multiple_of
51
- self.output_layer = output_layer
52
-
53
- if device is not None:
54
- self.to(device)
55
-
56
- model_path = Path(checkpoint_path)
57
-
58
- assert model_path.exists(), f'path {checkpoint_path} does not exist'
59
-
60
- checkpoint = torch.load(checkpoint_path, map_location=device)
61
- load_model_input = {checkpoint_path: checkpoint}
62
- model, *_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(load_model_input)
63
-
64
- if device is not None:
65
- model[0].to(device)
66
-
67
- self.model = model[0]
68
- self.model.eval()
69
-
70
- @property
71
- def groups(self):
72
- return 1
73
-
74
- @torch.no_grad()
75
- def forward(
76
- self,
77
- wav_input,
78
- flatten=True,
79
- input_sample_hz=None
80
- ):
81
- device = wav_input.device
82
-
83
- if exists(input_sample_hz):
84
- wav_input = resample(wav_input, input_sample_hz, self.target_sample_hz)
85
-
86
- if exists(self.seq_len_multiple_of):
87
- wav_input = curtail_to_multiple(wav_input, self.seq_len_multiple_of)
88
-
89
- embed = self.model(
90
- wav_input,
91
- features_only=True,
92
- mask=False, # thanks to @maitycyrus for noticing that mask is defaulted to True in the fairseq code
93
- output_layer=self.output_layer
94
- )
95
-
96
- embed, packed_shape = pack([embed['x']], '* d')
97
-
98
- # codebook_indices = self.kmeans.predict(embed.cpu().detach().numpy())
99
-
100
- codebook_indices = torch.from_numpy(embed.cpu().detach().numpy()).to(device) # .long()
101
-
102
- if flatten:
103
- return codebook_indices
104
-
105
- codebook_indices, = unpack(codebook_indices, packed_shape, '*')
106
- return codebook_indices
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/diffq/base.py DELETED
@@ -1,262 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from dataclasses import dataclass
8
- from concurrent import futures
9
- from fnmatch import fnmatch
10
- from functools import partial
11
- import io
12
- import math
13
- from multiprocessing import cpu_count
14
- import typing as tp
15
- import zlib
16
-
17
- import torch
18
-
19
-
20
- class BaseQuantizer:
21
- @dataclass
22
- class _QuantizedParam:
23
- name: str
24
- param: torch.nn.Parameter
25
- module: torch.nn.Module
26
- # If a Parameter is used multiple times, `other` can be used
27
- # to share state between the different Quantizers
28
- other: tp.Optional[tp.Any]
29
-
30
- def __init__(self, model: torch.nn.Module, min_size: float = 0.01, float16: bool = False,
31
- exclude: tp.Optional[tp.List[str]] = [], detect_bound: bool = True):
32
- self.model = model
33
- self.min_size = min_size
34
- self.float16 = float16
35
- self.exclude = exclude
36
- self.detect_bound = detect_bound
37
- self._quantized = False
38
- self._pre_handle = self.model.register_forward_pre_hook(self._forward_pre_hook)
39
- self._post_handle = self.model.register_forward_hook(self._forward_hook)
40
-
41
- self._quantized_state = None
42
- self._qparams = []
43
- self._float16 = []
44
- self._others = []
45
- self._rnns = []
46
-
47
- self._saved = []
48
-
49
- self._find_params()
50
-
51
- def _find_params(self):
52
- min_params = self.min_size * 2**20 // 4
53
- previous = {}
54
- for module_name, module in self.model.named_modules():
55
- if isinstance(module, torch.nn.RNNBase):
56
- self._rnns.append(module)
57
- for name, param in list(module.named_parameters(recurse=False)):
58
- full_name = f"{module_name}.{name}"
59
- matched = False
60
- for pattern in self.exclude:
61
- if fnmatch(full_name, pattern) or fnmatch(name, pattern):
62
- matched = True
63
- break
64
-
65
- if param.numel() <= min_params or matched:
66
- if id(param) in previous:
67
- continue
68
- if self.detect_bound:
69
- previous[id(param)] = None
70
- if self.float16:
71
- self._float16.append(param)
72
- else:
73
- self._others.append(param)
74
- else:
75
- qparam = self._register_param(name, param, module, previous.get(id(param)))
76
- if self.detect_bound:
77
- previous[id(param)] = qparam
78
- self._qparams.append(qparam)
79
-
80
- def _register_param(self, name, param, module, other):
81
- return self.__class__._QuantizedParam(name, param, module, other)
82
-
83
- def _forward_pre_hook(self, module, input):
84
- if self.model.training:
85
- self._quantized_state = None
86
- if self._quantized:
87
- self.unquantize()
88
- if self._pre_forward_train():
89
- self._fix_rnns()
90
- else:
91
- self.quantize()
92
-
93
- def _forward_hook(self, module, input, output):
94
- if self.model.training:
95
- if self._post_forward_train():
96
- self._fix_rnns(flatten=False) # Hacky, next forward will flatten
97
-
98
- def quantize(self, save=True):
99
- """
100
- Immediately apply quantization to the model parameters.
101
- If `save` is True, save a copy of the unquantized parameters, that can be
102
- restored with `unquantize()`.
103
- """
104
- if self._quantized:
105
- return
106
- if save:
107
- self._saved = [qp.param.data.to('cpu', copy=True)
108
- for qp in self._qparams if qp.other is None]
109
- self.restore_quantized_state(self.get_quantized_state())
110
- self._quantized = True
111
- self._fix_rnns()
112
-
113
- def unquantize(self):
114
- """
115
- Revert a previous call to `quantize()`.
116
- """
117
- if not self._quantized:
118
- raise RuntimeError("Can only be called on a quantized model.")
119
- if not self._saved:
120
- raise RuntimeError("Nothing to restore.")
121
- for qparam in self._qparams:
122
- if qparam.other is None:
123
- qparam.param.data[:] = self._saved.pop(0)
124
- assert len(self._saved) == 0
125
- self._quantized = False
126
- self._fix_rnns()
127
-
128
- def _pre_forward_train(self) -> bool:
129
- """
130
- Called once before each forward for continuous quantization.
131
- Should return True if parameters were changed.
132
- """
133
- return False
134
-
135
- def _post_forward_train(self) -> bool:
136
- """
137
- Called once after each forward (to restore state for instance).
138
- Should return True if parameters were changed.
139
- """
140
- return False
141
-
142
- def _fix_rnns(self, flatten=True):
143
- """
144
- To be called after quantization happened to fix RNNs.
145
- """
146
- for rnn in self._rnns:
147
- rnn._flat_weights = [
148
- (lambda wn: getattr(rnn, wn) if hasattr(rnn, wn) else None)(wn)
149
- for wn in rnn._flat_weights_names]
150
- if flatten:
151
- rnn.flatten_parameters()
152
-
153
- def get_quantized_state(self):
154
- """
155
- Returns sufficient quantized information to rebuild the model state.
156
-
157
- ..Note::
158
- To achieve maximum compression, you should compress this with
159
- gzip or other, as quantized weights are not optimally coded!
160
- """
161
- if self._quantized_state is None:
162
- self._quantized_state = self._get_quantized_state()
163
- return self._quantized_state
164
-
165
- def _get_quantized_state(self):
166
- """
167
- Actual implementation for `get_quantized_state`.
168
- """
169
- float16_params = []
170
- for p in self._float16:
171
- q = p.data.half()
172
- float16_params.append(q)
173
-
174
- return {
175
- "quantized": [self._quantize_param(qparam) for qparam in self._qparams
176
- if qparam.other is None],
177
- "float16": float16_params,
178
- "others": [p.data.clone() for p in self._others],
179
- }
180
-
181
- def _quantize_param(self, qparam: _QuantizedParam) -> tp.Any:
182
- """
183
- To be overriden.
184
- """
185
- raise NotImplementedError()
186
-
187
- def _unquantize_param(self, qparam: _QuantizedParam, quantized: tp.Any) -> torch.Tensor:
188
- """
189
- To be overriden.
190
- """
191
- raise NotImplementedError()
192
-
193
- def restore_quantized_state(self, state) -> None:
194
- """
195
- Restore the state of the model from the quantized state.
196
- """
197
- for p, q in zip(self._float16, state["float16"]):
198
- p.data[:] = q.to(p)
199
-
200
- for p, q in zip(self._others, state["others"]):
201
- p.data[:] = q
202
-
203
- remaining = list(state["quantized"])
204
- for qparam in self._qparams:
205
- if qparam.other is not None:
206
- # Only unquantize first appearance of nn.Parameter.
207
- continue
208
- quantized = remaining.pop(0)
209
- qparam.param.data[:] = self._unquantize_param(qparam, quantized)
210
- self._fix_rnns()
211
-
212
- def detach(self) -> None:
213
- """
214
- Detach from the model, removes hooks and anything else.
215
- """
216
- self._pre_handle.remove()
217
- self._post_handle.remove()
218
-
219
- def model_size(self) -> torch.Tensor:
220
- """
221
- Returns an estimate of the quantized model size.
222
- """
223
- total = torch.tensor(0.)
224
- for p in self._float16:
225
- total += 16 * p.numel()
226
- for p in self._others:
227
- total += 32 * p.numel()
228
- return total / 2**20 / 8 # bits to MegaBytes
229
-
230
- def true_model_size(self) -> float:
231
- """
232
- Return the true quantized model size, in MB, without extra
233
- compression.
234
- """
235
- return self.model_size().item()
236
-
237
- def compressed_model_size(self, compress_level=-1, num_workers=8) -> float:
238
- """
239
- Return the compressed quantized model size, in MB.
240
-
241
- Args:
242
- compress_level (int): compression level used with zlib,
243
- see `zlib.compress` for details.
244
- num_workers (int): will split the final big byte representation in that
245
- many chunks processed in parallels.
246
- """
247
- out = io.BytesIO()
248
- torch.save(self.get_quantized_state(), out)
249
- ms = _parallel_compress_len(out.getvalue(), compress_level, num_workers)
250
- return ms / 2 ** 20
251
-
252
-
253
- def _compress_len(data, compress_level):
254
- return len(zlib.compress(data, level=compress_level))
255
-
256
-
257
- def _parallel_compress_len(data, compress_level, num_workers):
258
- num_workers = min(cpu_count(), num_workers)
259
- chunk_size = int(math.ceil(len(data) / num_workers))
260
- chunks = [data[offset:offset + chunk_size] for offset in range(0, len(data), chunk_size)]
261
- with futures.ProcessPoolExecutor(num_workers) as pool:
262
- return sum(pool.map(partial(_compress_len, compress_level=compress_level), chunks))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Barikad Crew Album Goumen Pou Saw Kwe Mp3 Download.md DELETED
@@ -1,29 +0,0 @@
1
-
2
- <h1>Álbum de la tripulación de Barikad Goumen Pou Saw Kwe Mp3 Descargar</h1>
3
- <p>Si usted está buscando un poco de auténtica y poderosa música rap haitiana, es posible que desee echar un vistazo a barikad crew, uno de los grupos de rap kreyòl más populares e influyentes en Haití. Su álbum goumen pou vio kwe, que significa "luchar por lo que crees", es una obra maestra de comentario social, expresión cultural y creatividad musical. En este artículo, exploraremos la historia, la música y las opciones de descarga de este increíble álbum. </p>
4
- <h2>barikad crew album goumen pou saw kwe mp3 download</h2><br /><p><b><b>Download File</b> &#9913; <a href="https://bltlly.com/2v6L7I">https://bltlly.com/2v6L7I</a></b></p><br /><br />
5
- <h2>Historia de Barikad Crew</h2>
6
- <p>Barikad crew fue fundada en 2002 por tres raperos del barrio de Bas Peu d'Chose en Puerto Príncipe: Papa K-tafalk, Deja-Voo y Kondagana. Invitaron a varios otros raperos de diferentes grupos underground a unirse a ellos y formar un colectivo que representaría la voz de los barrios marginales. Su nombre proviene de las barricadas que solían bloquear las calles durante las protestas y manifestaciones. </p>
7
- <p>El grupo saltó a la fama en 2005 cuando ganaron el tercer lugar en un concurso de canciones de Navidad en Telemax con su canción "Nwel Pa’m". También lanzaron su primer single "Bay Hip Hop Bourad" en 2004, seguido por "Kijan'l Te Ye" en 2005, y "Trip N'ap Trip" en 2006. Su álbum debut goumen pou vio kwe fue lanzado en 2007 y recibió la aclamación de la crítica y el éxito comercial. El álbum cuenta con 21 temas que cubren varios temas como la política, cuestiones sociales, religión, amor e identidad. </p>
8
-
9
- <h2>Música de Barikad Crew</h2>
10
- <p>La música de la tripulación barikad se caracteriza por su fusión de ritmos africanos, melodías caribeñas y ritmos de hip hop. El grupo utiliza varios instrumentos como tambores, trompas, guitarras, teclados y tocadiscos para crear un sonido rico y diverso. El grupo también incorpora elementos de otros géneros musicales haitianos como kompa, rara, twoubadou, merengue y vodou drumming. </p>
11
- <p>Las letras de los barikad son principalmente en criollo haitiano, el idioma más hablado en Haití. El grupo usa sus palabras para expresar sus opiniones, emociones, experiencias y aspiraciones. También usan metáforas, humor, sarcasmo y juegos de palabras para transmitir sus mensajes. El grupo aborda temas como pobreza, corrupción, violencia, injusticia, opresión, resistencia, esperanza, fe, orgullo, solidaridad y patriotismo. </p>
12
- <p></p>
13
- <p>La música de la tripulación barikad no solo es entretenida, sino también educativa e inspiradora. El objetivo del grupo es crear conciencia sobre las realidades y los desafíos del pueblo haitiano y alentarlo a luchar por sus derechos y su dignidad. El grupo también celebra la belleza y diversidad de la cultura y la historia de Haití y rinde homenaje a sus antepasados y héroes. </p>
14
- <h2>Opciones de descarga</h2>
15
- <p>Si quieres escuchar el álbum de la banda de barikad goumen pou saw kwe o cualquiera de sus otros álbumes, tienes varias opciones para descargarlos en línea. Puedes comprarlos desde plataformas como iTunes , Amazon , o CD Baby . También puede transmitirlos en plataformas como Spotify , YouTube , o TIDAL . También puede descargar sus canciones de forma gratuita desde su sitio web oficial o desde otros sitios web que ofrecen descargas de mp3 gratis . Sin embargo, te recomendamos que apoyes a los artistas comprando sus álbumes o transmitiendo su música legalmente. </p>
16
- <h2>Conclusión</h2>
17
-
18
- <h2>Preguntas frecuentes</h2>
19
- <h3>¿Qué significa tripulación barikad? </h3>
20
- <p>Tripulación Barikad significa equipo de barricadas. El nombre proviene de las barricadas que el grupo solía bloquear las calles durante las protestas y manifestaciones. El nombre también simboliza su resistencia a la opresión y la injusticia y su defensa de sus derechos y dignidad. </p>
21
- <h3>¿Cuántos miembros hay en la tripulación de Barikad? </h3>
22
- <p>El equipo barikad original tenía 13 miembros: Papa K-tafalk, Deja-Voo, Kondagana, Dade, Young Cliff, Fantom, Bricks, Izolan, Marco, Brital, Manno Beats, Master Sun y Bafon Plim. Sin embargo, cuatro de ellos murieron en trágicos incidentes: K-tafalk, Deja-Voo, Dade y Young Cliff. El equipo barikad actual tiene nueve miembros: Fantom, Bricks, Izolan, Marco, Brital, Manno Beats, Master Sun, Bafon Plim y Drz.</p>
23
- <h3>¿Cuáles son algunas de las mejores canciones de barikad crew? </h3>
24
- <p>Some of the best songs by barikad crew are: "Goumen Pou Saw Kwè", "Toup Pou Yo", "Trip N'ap Trip", "Kijan'l Te Ye", "Bay Hip Hop Bourad", "Tòf", "Travay", "Tann Jou Pa'w", "Jiskobou", and "Men Flow". </p>
25
- <h3>¿Cuáles son algunos de los premios y honores que la tripulación barikad ha recibido? </h3>
26
- <p>Algunos de los premios y honores que el equipo barikad ha recibido son: Mejor Grupo de Rap en los Haiti Music Awards en 2007 y 2008; Mejor Álbum de Rap para goumen pou vio kwe en los Haiti Music Awards en 2008; Mejor Álbum de Rap para RED en los Haiti Music Awards en 2013; Mejor álbum de rap para Toutouni en los Haiti Music Awards en 2016; Mejor álbum de rap para Travay en los Haiti Music Awards en 2017; Mejor álbum de rap para Tann Jou Pa'w en los Haiti Music Awards en 2018; Mejor álbum de rap para TOF en los Haiti Music Awards en 2019; y Mejor Grupo de Rap en los Caribbean Music Awards en 2019. </p>
27
- <h3>¿Dónde puedo encontrar más información sobre la tripulación de barikad? </h3> 64aa2da5cf<br />
28
- <br />
29
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Cmo Descargar Sims En Sims 3.md DELETED
@@ -1,154 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar Sims en Sims 3</h1>
3
- <p>Si eres fan de la serie Los Sims, probablemente sepas que uno de los aspectos más divertidos del juego es crear y personalizar tus propios Sims. Pero, ¿sabías que también puedes descargar Sims que otros jugadores han hecho y usarlos en tu juego? En este artículo, te mostraremos cómo descargar Sims en Sims 3 desde diferentes fuentes, como el sitio web oficial, videos de YouTube y otros sitios web. También explicaremos cómo instalarlos y cuáles son los requisitos y características del sistema de Sims 3.</p>
4
- <h2>Introducción</h2>
5
- <h3>¿Qué es Sims 3 y por qué descargar Sims? </h3>
6
- <p>Los Sims 3 es el tercer título principal de la serie Los Sims de juegos de simulación de vida. Fue lanzado en 2009 para Windows, Mac y varias consolas y dispositivos móviles. El juego te permite crear y controlar personas virtuales llamadas "Sims" en un entorno de mundo abierto. Puedes personalizar su apariencia, personalidad, habilidades, relaciones, carreras, pasatiempos y más. También puede construir y decorar sus casas, lotes comunitarios y vecindarios. </p>
7
- <h2>cómo descargar sims en sims 3</h2><br /><p><b><b>DOWNLOAD</b> &#9734;&#9734;&#9734;&#9734;&#9734; <a href="https://bltlly.com/2v6Mx2">https://bltlly.com/2v6Mx2</a></b></p><br /><br />
8
- <p>Una de las razones por las que quieres descargar Sims en Sims 3 es para añadir más variedad y diversidad a tu juego. Puedes encontrar Sims que tienen diferentes looks, estilos, rasgos, historias y orígenes que los que creas tú mismo. También puedes encontrar Sims inspirados en celebridades, personajes de ficción o personas reales. Descargar Sims puede ayudarte a llenar tu mundo con personajes interesantes y únicos con los que puedes interactuar, entablar amistad o incluso romance. </p>
9
- <h3>¿Cuáles son los requisitos y características del sistema de los Sims 3?</h3>
10
- <p>Antes de descargar Sims en Sims 3, debes asegurarte de que tu computadora o dispositivo cumple con los requisitos mínimos del sistema para el juego. Según [EA Help]( 9 ), estos son:</p>
11
- <ul>
12
- <li>OS: Windows XP (Service Pack 2) o Windows Vista (Service Pack 1)</li>
13
-
14
- <li>Memoria: (XP) 1 GB RAM; (Vista) 1.5 GB RAM</li>
15
- <li>Gráficos: Tarjeta de video de 128 MB con soporte para Pixel Shader 2.0</li>
16
- <li>DirectX: Versión 9.0c</li>
17
- <li>Almacenamiento: Al menos 6.5 GB de espacio en el disco duro con al menos 1 GB de espacio adicional para contenido personalizado</li>
18
- </ul>
19
- <p>Si tienes un Mac, necesitas:</p>
20
- <ul>
21
- <li>OS: Mac OS X 10.5.7 Leopardo o superior</li>
22
- <li>Procesador: Procesador Intel Core Duo</li>
23
- <li>Memoria: 2 GB de RAM</li>
24
- <li>Gráficos: ATI X1600 o Nvidia 7300 GT con 128 MB de RAM de vídeo, o Intel integrado GMA X3100</li>
25
- <li>Almacenamiento: Al menos 6.1 GB de espacio en el disco duro con al menos 1 GB de espacio adicional para contenido personalizado</li>
26
- </ul>
27
- <p>Los Sims 3 tiene muchas características que lo convierten en un gran juego para descargar Sims. Algunas de estas características son:</p>
28
- <ul>
29
- <li>Un concepto de mundo abierto que te permite explorar toda una ciudad sin necesidad de cargar pantallas</li>
30
- <li>A <li>A Crea un modo Sim que te permite personalizar cada detalle de tus Sims, desde sus características faciales, forma del cuerpo, tono de piel, color de pelo, ropa, accesorios, tatuajes y más</li>
31
- <li>Una herramienta para crear un estilo que te permite cambiar el color y el patrón de cualquier objeto en el juego</li>
32
- <li>Un sistema de rasgos que da a tus Sims personalidades y comportamientos únicos</li>
33
- <li>Un sistema de deseos y felicidad de por vida que motiva a tus Sims a perseguir sus metas y sueños</li>
34
- <li>Un sistema de habilidades que permite a tus Sims aprender y mejorar varias habilidades, como cocinar, pintar, cultivar un huerto, escribir y más</li>
35
- <li>Un sistema de carrera que ofrece a tus Sims diferentes oportunidades y trayectorias de trabajo, como periodismo, aplicación de la ley, medicina, música y más</li>
36
- <li>Un sistema social que permite a tus Sims interactuar con otros Sims de varias maneras, como chatear, flirtear, pelear, bromear y más</li>
37
- <li>Un sistema familiar que permite a tus Sims formar relaciones, casarse, tener hijos y envejecer</li>
38
-
39
- </ul>
40
- <h2>Cómo descargar Sims de diferentes fuentes</h2>
41
- <p>Ahora que sabes lo que es Sims 3 y lo que puede ofrecerte, te estarás preguntando cómo descargar Sims en Sims 3. Hay muchas fuentes donde puedes encontrar Sims para descargar tu juego. Algunos de los más populares son:</p>
42
- <ul>
43
- <li>El sitio web oficial de Los Sims 3</li>
44
- <li>videos de YouTube</li>
45
- <li>Otros sitios web</li>
46
- </ul>
47
- <p>Explicaremos cómo descargar Sims de cada una de estas fuentes en las siguientes secciones. </p>
48
- <h3>Cómo descargar Sims desde el sitio web oficial</h3>
49
- <p>El sitio web oficial de Los Sims 3 es [thesims3.com]. Aquí es donde puede comprar el juego y sus expansiones, registrar su juego y canjear su código, acceder a su cuenta y perfil, navegar por la tienda en línea y el intercambio, y descargar mundos y contenido de bonificación. También puedes encontrar Sims predefinidos creados por los desarrolladores u otros jugadores. Para descargar Sims desde el sitio web oficial, debes seguir estos pasos:</p>
50
- <p></p>
51
- <h4>Cómo registrar tu juego y canjear tu código</h4>
52
- <p>Si compraste el juego desde un disco físico o un servicio de descarga digital distinto de Origin (como Steam), necesitas registrar tu juego y canjear tu código en el sitio web oficial. Esto te permitirá acceder a las funciones online del juego y descargar contenido de la tienda e intercambiar. Para hacer esto, necesitas:</p>
53
- <ol>
54
- <li>Crear una cuenta EA o iniciar sesión con una existente</li>
55
- <li>Ir a [thesims3.com/registeragame] e introduzca su código de serie de 20 dígitos que vino con su juego o fue enviado por correo electrónico a usted</li>
56
- <li>Haga clic en "Registrarse" y confirme su registro</li>
57
- <li>Ir a [thesims3.com/myaccount] y haga clic en "Canjear un código"</li>
58
- <li>Ingrese su código de 16 dígitos que vino con su juego o fue enviado por correo electrónico a usted</li>
59
- <li>Haga clic en "Canjear" y confirme su redención</li>
60
- <li>Tu juego se añadirá a tu biblioteca de Origin y podrás descargarlo desde allí</li>
61
- </ol>
62
-
63
- <p>Si compraste el juego en Origin o lo registraste en el sitio web oficial, puedes instalarlo desde Origin. Origin es una plataforma de distribución digital que te permite comprar, descargar y jugar juegos de EA. Para instalar tu juego y expansiones en Origin, necesitas:</p>
64
- <ol>
65
- <li>Crear una cuenta de Origin o iniciar sesión con una existente</li>
66
- <li>Descargar e instalar Origin desde [origin.com]</li>
67
- <li> Origen de lanzamiento y vaya a "Mi biblioteca de juegos"</li>
68
- <li>Encuentra los Sims 3 y haz clic en "Descargar"</li>
69
- <li>Espere a que finalicen la descarga y la instalación</li>
70
- <li>Repita los pasos 4-5 para cualquier expansión o paquete de cosas que haya comprado o canjeado</li>
71
- <li>Puedes iniciar el juego desde Origin o desde tu acceso directo de escritorio</li>
72
- </ol>
73
- <h4>Cómo descargar e instalar mundos y contenido adicional de thesims3.com</h4>
74
- <p>El sitio web oficial de Los Sims 3 también ofrece algunos mundos libres y contenido extra que puedes descargar para tu juego. Estos incluyen Riverview, Barnacle Bay, Hidden Springs, Lunar Lakes, Lucky Palms, Sunlit Tides, Monte Vista, Dragon Valley, Midnight Hollow, Roaring Heights y algunos artículos y conjuntos adicionales. Para descargar e instalar estos mundos y contenido adicional, necesitas:</p>
75
- <ol>
76
- <li>Ir a [thesims3.com/worlds] y elegir el mundo que desea descargar</li>
77
- <li>Haga clic en "Descargar" y confirme su compra (algunos mundos son gratuitos y algunos requieren SimPoints, que puede comprar con dinero real)</li>
78
- <li>Vaya a [thesims3.com/myaccount] y haga clic en "Historial de compras"</li>
79
- <li>Encuentra el mundo que has descargado y haz clic en "Descargar"</li>
80
- <li>El lanzador se abrirá e instalará el mundo automáticamente</li>
81
- <li>Repita los pasos 1-5 para cualquier contenido de bonificación que desee descargar</li>
82
- <li>Puedes encontrar los mundos y el contenido de bonificación en tu juego bajo "New Game" o "Edit Town"</li>
83
- </ol>
84
- <h3>Cómo descargar Sims de vídeos de YouTube</h3>
85
-
86
- <h4>Cómo encontrar y descargar Sims pre-hechos de los canales de YouTube</h4>
87
- <p>Para encontrar y descargar Sims pre-hechos de los canales de YouTube, es necesario:</p>
88
- <ol>
89
- <li>Buscar "descarga de Sims 3" o palabras clave similares en YouTube</li>
90
- <li>Navegar por los resultados y encontrar un video que muestra un Sim que te gusta</li>
91
- <li>Mira el video y presta atención a la descripción y los comentarios</li>
92
- <li>Busque un enlace para descargar el Sim o el contenido personalizado utilizado para el Sim</li>
93
- <li>Haga clic en el enlace y siga las instrucciones para descargar el archivo (generalmente un .sim o un archivo .zip)</li>
94
- <li>Guarde el archivo en su computadora</li>
95
- </ol>
96
- <h4>Cómo instalar Sims pre-hechos usando el lanzador o la carpeta mods</h4>
97
- <p>Para instalar Sims pre-hechos usando el lanzador o la carpeta mods, necesitas:</p>
98
- <ol>
99
- <li>Si el archivo es un archivo . sim, cópialo a Documents Electronic Arts Los Sims 3 SavedSims</li>
100
- <li> Si el archivo es un archivo . zip, extraerlo usando un programa como WinRAR o 7-Zip</li>
101
- <li>Si el archivo extraído es un archivo . sim, copiarlo a Documents Electronic Arts Los Sims 3 SavedSims</li>
102
- <li>Si el archivo extraído es un . archivo de paquete, copiarlo a Documents Electronic Arts The Sims 3 Mods Packages (si no tiene una carpeta Mods, puede crear una siguiente [esta guía])</li>
103
- <li>Inicie el juego y vaya a Crear un modo Sim</li>
104
- <li>Haga clic en "Pre-made Sims" y encontrar el Sim que ha descargado</li>
105
- <li>Personaliza el Sim como quieras y agrégalo a tu hogar</li>
106
- </ol>
107
- <h3>Cómo descargar Sims desde otros sitios web</h3>
108
- <p>La última fuente donde puedes encontrar Sims para descargar tu juego son otros sitios web. Hay muchos sitios de fans y foros que crean y comparten Sims personalizados para Sims 3. Estos Sims suelen tener contenido personalizado, como peinados, ropa, maquillaje, accesorios, etc. Para descargar Sims de otros sitios web, debe seguir estos pasos:</p>
109
- <h4>Cómo encontrar y descargar Sims personalizados desde sitios de fans y foros</h4>
110
-
111
- <ol>
112
- <li>Buscar "Sims 3 sims personalizados" o palabras clave similares en Google o su motor de búsqueda preferido</li>
113
- <li>Busca los resultados y encuentra un sitio web que ofrezca Sims personalizados para descargar (algunos ejemplos son [Mod The Sims], [The Sims Resource], [Sims 3 Updates], etc.)</li>
114
- <li>Navegar por el sitio web y encontrar un Sim que te gusta</li>
115
- <li>Haga clic en el nombre o la imagen del Sim y lea la descripción y los comentarios</li>
116
- <li>Busque un enlace o un botón para descargar el Sim o el contenido personalizado utilizado para el Sim (generalmente un .sim o un archivo .zip)</li>
117
- <li>Haga clic en el enlace o botón y siga las instrucciones para descargar el archivo (es posible que tenga que registrarse o iniciar sesión en algunos sitios web)</li>
118
- <li>Guarde el archivo en su computadora</li>
119
- </ol>
120
- <h4>Cómo instalar Sims personalizados usando el lanzador o la carpeta mods</h4>
121
- <p>Para instalar Sims personalizados usando el lanzador o la carpeta mods, debe seguir los mismos pasos que en la sección anterior. </ <p>Ahora ya sabes cómo descargar Sims en Sims 3 desde diferentes fuentes y cómo instalarlos en tu juego. Puedes disfrutar jugando con los Sims que descargaste o crear tus propios Sims usando las herramientas y características del juego. También puedes compartir tus Sims con otros jugadores en el sitio web oficial, YouTube u otros sitios web. </p>
122
- <h2>Conclusión</h2>
123
- <h3>Resumen de los principales puntos y consejos</h3>
124
- <p>En este artículo, hemos cubierto los siguientes temas:</p>
125
- <ul>
126
- <li>¿Qué es Sims 3 y por qué descargar Sims? </li>
127
- <li> ¿Cuáles son los requisitos y características del sistema de Sims 3?</li>
128
- <li>Cómo descargar Sims desde el sitio web oficial, vídeos de YouTube y otros sitios web</li>
129
- <li>Cómo instalar Sims usando el lanzador o la carpeta mods</li>
130
- </ul>
131
- <p>Aquí hay algunos consejos para recordar al descargar Sims en Sims 3:</p>
132
- <ul>
133
- <li> Compruebe siempre la descripción y los comentarios del Sim que desea descargar para obtener instrucciones, advertencias o recomendaciones</li>
134
- <li>Siempre copia de seguridad de los archivos del juego antes de instalar cualquier contenido personalizado</li>
135
-
136
- <li>Siempre da crédito a los creadores originales de los Sims o al contenido personalizado que usas o compartes</li>
137
- <li>Siempre diviértete y sé creativo con tus Sims</li>
138
- </ul>
139
- <h3>Preguntas frecuentes</h3>
140
- <p>Aquí hay algunas preguntas frecuentes sobre la descarga de Sims en Sims 3:</p>
141
- <ol>
142
- <li><b>¿Cómo puedo desinstalar un Sim o un contenido personalizado que he descargado? </b></li>
143
- <p>Puede desinstalar un Sim o un contenido personalizado que descargó borrando el archivo de su carpeta SavedSims o Mods Packages. También puede usar el lanzador para desinstalarlos yendo a "Contenido instalado" y haciendo clic en "Desinstalar". </p>
144
- <li><b>¿Cómo actualizo mi juego y mi contenido personalizado? </b></li>
145
- <p>Puedes actualizar tu juego y tu contenido personalizado usando Origin o el lanzador. Origin actualizará automáticamente tu juego y tus expansiones cuando lo lances. El lanzador comprobará si hay actualizaciones cuando lo inicie y le pedirá que las descargue. También puede actualizar su contenido personalizado visitando los sitios web donde los descargó y buscando nuevas versiones. </p>
146
- <li><b>¿Cómo puedo arreglar un Sim roto o dañado o contenido personalizado? </b></li>
147
- <p>Puedes arreglar un Sim roto o dañado o contenido personalizado usando una herramienta como [Delphy’s Dashboard] o [Custard]. Estas herramientas pueden escanear tus archivos de juego e identificar cualquier problema o conflicto con tu contenido personalizado. A continuación, puede eliminar o corregir los archivos problemáticos. </p>
148
- <li><b>¿Cómo puedo encontrar más Sims o contenido personalizado para descargar? </b></li>
149
- <p>Puedes encontrar más Sims o contenido personalizado para descargar buscando en línea o navegando por diferentes sitios web. Algunos de los sitios web más populares para descargar Sims o contenido personalizado son [Mod The Sims], [The Sims Resource], [Sims 3 Updates], [Around The Sims 3], [Lorandia Sims 3], [Parsimonious], [Anubis Under The Sun], [All About Style], [Liana Sims 3], [XM Sims 3, etc/p.
150
- <li><b>¿Cómo hago mis propios Sims o contenido personalizado? </b></li>
151
-
152
- </ol></p> 64aa2da5cf<br />
153
- <br />
154
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/packaging/_structures.py DELETED
@@ -1,61 +0,0 @@
1
- # This file is dual licensed under the terms of the Apache License, Version
2
- # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
- # for complete details.
4
-
5
-
6
- class InfinityType:
7
- def __repr__(self) -> str:
8
- return "Infinity"
9
-
10
- def __hash__(self) -> int:
11
- return hash(repr(self))
12
-
13
- def __lt__(self, other: object) -> bool:
14
- return False
15
-
16
- def __le__(self, other: object) -> bool:
17
- return False
18
-
19
- def __eq__(self, other: object) -> bool:
20
- return isinstance(other, self.__class__)
21
-
22
- def __gt__(self, other: object) -> bool:
23
- return True
24
-
25
- def __ge__(self, other: object) -> bool:
26
- return True
27
-
28
- def __neg__(self: object) -> "NegativeInfinityType":
29
- return NegativeInfinity
30
-
31
-
32
- Infinity = InfinityType()
33
-
34
-
35
- class NegativeInfinityType:
36
- def __repr__(self) -> str:
37
- return "-Infinity"
38
-
39
- def __hash__(self) -> int:
40
- return hash(repr(self))
41
-
42
- def __lt__(self, other: object) -> bool:
43
- return True
44
-
45
- def __le__(self, other: object) -> bool:
46
- return True
47
-
48
- def __eq__(self, other: object) -> bool:
49
- return isinstance(other, self.__class__)
50
-
51
- def __gt__(self, other: object) -> bool:
52
- return False
53
-
54
- def __ge__(self, other: object) -> bool:
55
- return False
56
-
57
- def __neg__(self: object) -> InfinityType:
58
- return Infinity
59
-
60
-
61
- NegativeInfinity = NegativeInfinityType()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/command/egg_info.py DELETED
@@ -1,763 +0,0 @@
1
- """setuptools.command.egg_info
2
-
3
- Create a distribution's .egg-info directory and contents"""
4
-
5
- from distutils.filelist import FileList as _FileList
6
- from distutils.errors import DistutilsInternalError
7
- from distutils.util import convert_path
8
- from distutils import log
9
- import distutils.errors
10
- import distutils.filelist
11
- import functools
12
- import os
13
- import re
14
- import sys
15
- import io
16
- import warnings
17
- import time
18
- import collections
19
-
20
- from .._importlib import metadata
21
- from .. import _entry_points
22
-
23
- from setuptools import Command
24
- from setuptools.command.sdist import sdist
25
- from setuptools.command.sdist import walk_revctrl
26
- from setuptools.command.setopt import edit_config
27
- from setuptools.command import bdist_egg
28
- from pkg_resources import (
29
- Requirement, safe_name, parse_version,
30
- safe_version, to_filename)
31
- import setuptools.unicode_utils as unicode_utils
32
- from setuptools.glob import glob
33
-
34
- from setuptools.extern import packaging
35
- from setuptools.extern.jaraco.text import yield_lines
36
- from setuptools import SetuptoolsDeprecationWarning
37
-
38
-
39
- def translate_pattern(glob): # noqa: C901 # is too complex (14) # FIXME
40
- """
41
- Translate a file path glob like '*.txt' in to a regular expression.
42
- This differs from fnmatch.translate which allows wildcards to match
43
- directory separators. It also knows about '**/' which matches any number of
44
- directories.
45
- """
46
- pat = ''
47
-
48
- # This will split on '/' within [character classes]. This is deliberate.
49
- chunks = glob.split(os.path.sep)
50
-
51
- sep = re.escape(os.sep)
52
- valid_char = '[^%s]' % (sep,)
53
-
54
- for c, chunk in enumerate(chunks):
55
- last_chunk = c == len(chunks) - 1
56
-
57
- # Chunks that are a literal ** are globstars. They match anything.
58
- if chunk == '**':
59
- if last_chunk:
60
- # Match anything if this is the last component
61
- pat += '.*'
62
- else:
63
- # Match '(name/)*'
64
- pat += '(?:%s+%s)*' % (valid_char, sep)
65
- continue # Break here as the whole path component has been handled
66
-
67
- # Find any special characters in the remainder
68
- i = 0
69
- chunk_len = len(chunk)
70
- while i < chunk_len:
71
- char = chunk[i]
72
- if char == '*':
73
- # Match any number of name characters
74
- pat += valid_char + '*'
75
- elif char == '?':
76
- # Match a name character
77
- pat += valid_char
78
- elif char == '[':
79
- # Character class
80
- inner_i = i + 1
81
- # Skip initial !/] chars
82
- if inner_i < chunk_len and chunk[inner_i] == '!':
83
- inner_i = inner_i + 1
84
- if inner_i < chunk_len and chunk[inner_i] == ']':
85
- inner_i = inner_i + 1
86
-
87
- # Loop till the closing ] is found
88
- while inner_i < chunk_len and chunk[inner_i] != ']':
89
- inner_i = inner_i + 1
90
-
91
- if inner_i >= chunk_len:
92
- # Got to the end of the string without finding a closing ]
93
- # Do not treat this as a matching group, but as a literal [
94
- pat += re.escape(char)
95
- else:
96
- # Grab the insides of the [brackets]
97
- inner = chunk[i + 1:inner_i]
98
- char_class = ''
99
-
100
- # Class negation
101
- if inner[0] == '!':
102
- char_class = '^'
103
- inner = inner[1:]
104
-
105
- char_class += re.escape(inner)
106
- pat += '[%s]' % (char_class,)
107
-
108
- # Skip to the end ]
109
- i = inner_i
110
- else:
111
- pat += re.escape(char)
112
- i += 1
113
-
114
- # Join each chunk with the dir separator
115
- if not last_chunk:
116
- pat += sep
117
-
118
- pat += r'\Z'
119
- return re.compile(pat, flags=re.MULTILINE | re.DOTALL)
120
-
121
-
122
- class InfoCommon:
123
- tag_build = None
124
- tag_date = None
125
-
126
- @property
127
- def name(self):
128
- return safe_name(self.distribution.get_name())
129
-
130
- def tagged_version(self):
131
- return safe_version(self._maybe_tag(self.distribution.get_version()))
132
-
133
- def _maybe_tag(self, version):
134
- """
135
- egg_info may be called more than once for a distribution,
136
- in which case the version string already contains all tags.
137
- """
138
- return (
139
- version if self.vtags and self._already_tagged(version)
140
- else version + self.vtags
141
- )
142
-
143
- def _already_tagged(self, version: str) -> bool:
144
- # Depending on their format, tags may change with version normalization.
145
- # So in addition the regular tags, we have to search for the normalized ones.
146
- return version.endswith(self.vtags) or version.endswith(self._safe_tags())
147
-
148
- def _safe_tags(self) -> str:
149
- # To implement this we can rely on `safe_version` pretending to be version 0
150
- # followed by tags. Then we simply discard the starting 0 (fake version number)
151
- return safe_version(f"0{self.vtags}")[1:]
152
-
153
- def tags(self) -> str:
154
- version = ''
155
- if self.tag_build:
156
- version += self.tag_build
157
- if self.tag_date:
158
- version += time.strftime("-%Y%m%d")
159
- return version
160
- vtags = property(tags)
161
-
162
-
163
- class egg_info(InfoCommon, Command):
164
- description = "create a distribution's .egg-info directory"
165
-
166
- user_options = [
167
- ('egg-base=', 'e', "directory containing .egg-info directories"
168
- " (default: top of the source tree)"),
169
- ('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"),
170
- ('tag-build=', 'b', "Specify explicit tag to add to version number"),
171
- ('no-date', 'D', "Don't include date stamp [default]"),
172
- ]
173
-
174
- boolean_options = ['tag-date']
175
- negative_opt = {
176
- 'no-date': 'tag-date',
177
- }
178
-
179
- def initialize_options(self):
180
- self.egg_base = None
181
- self.egg_name = None
182
- self.egg_info = None
183
- self.egg_version = None
184
- self.broken_egg_info = False
185
- self.ignore_egg_info_in_manifest = False
186
-
187
- ####################################
188
- # allow the 'tag_svn_revision' to be detected and
189
- # set, supporting sdists built on older Setuptools.
190
- @property
191
- def tag_svn_revision(self):
192
- pass
193
-
194
- @tag_svn_revision.setter
195
- def tag_svn_revision(self, value):
196
- pass
197
- ####################################
198
-
199
- def save_version_info(self, filename):
200
- """
201
- Materialize the value of date into the
202
- build tag. Install build keys in a deterministic order
203
- to avoid arbitrary reordering on subsequent builds.
204
- """
205
- egg_info = collections.OrderedDict()
206
- # follow the order these keys would have been added
207
- # when PYTHONHASHSEED=0
208
- egg_info['tag_build'] = self.tags()
209
- egg_info['tag_date'] = 0
210
- edit_config(filename, dict(egg_info=egg_info))
211
-
212
- def finalize_options(self):
213
- # Note: we need to capture the current value returned
214
- # by `self.tagged_version()`, so we can later update
215
- # `self.distribution.metadata.version` without
216
- # repercussions.
217
- self.egg_name = self.name
218
- self.egg_version = self.tagged_version()
219
- parsed_version = parse_version(self.egg_version)
220
-
221
- try:
222
- is_version = isinstance(parsed_version, packaging.version.Version)
223
- spec = "%s==%s" if is_version else "%s===%s"
224
- Requirement(spec % (self.egg_name, self.egg_version))
225
- except ValueError as e:
226
- raise distutils.errors.DistutilsOptionError(
227
- "Invalid distribution name or version syntax: %s-%s" %
228
- (self.egg_name, self.egg_version)
229
- ) from e
230
-
231
- if self.egg_base is None:
232
- dirs = self.distribution.package_dir
233
- self.egg_base = (dirs or {}).get('', os.curdir)
234
-
235
- self.ensure_dirname('egg_base')
236
- self.egg_info = to_filename(self.egg_name) + '.egg-info'
237
- if self.egg_base != os.curdir:
238
- self.egg_info = os.path.join(self.egg_base, self.egg_info)
239
- if '-' in self.egg_name:
240
- self.check_broken_egg_info()
241
-
242
- # Set package version for the benefit of dumber commands
243
- # (e.g. sdist, bdist_wininst, etc.)
244
- #
245
- self.distribution.metadata.version = self.egg_version
246
-
247
- # If we bootstrapped around the lack of a PKG-INFO, as might be the
248
- # case in a fresh checkout, make sure that any special tags get added
249
- # to the version info
250
- #
251
- pd = self.distribution._patched_dist
252
- if pd is not None and pd.key == self.egg_name.lower():
253
- pd._version = self.egg_version
254
- pd._parsed_version = parse_version(self.egg_version)
255
- self.distribution._patched_dist = None
256
-
257
- def write_or_delete_file(self, what, filename, data, force=False):
258
- """Write `data` to `filename` or delete if empty
259
-
260
- If `data` is non-empty, this routine is the same as ``write_file()``.
261
- If `data` is empty but not ``None``, this is the same as calling
262
- ``delete_file(filename)`. If `data` is ``None``, then this is a no-op
263
- unless `filename` exists, in which case a warning is issued about the
264
- orphaned file (if `force` is false), or deleted (if `force` is true).
265
- """
266
- if data:
267
- self.write_file(what, filename, data)
268
- elif os.path.exists(filename):
269
- if data is None and not force:
270
- log.warn(
271
- "%s not set in setup(), but %s exists", what, filename
272
- )
273
- return
274
- else:
275
- self.delete_file(filename)
276
-
277
- def write_file(self, what, filename, data):
278
- """Write `data` to `filename` (if not a dry run) after announcing it
279
-
280
- `what` is used in a log message to identify what is being written
281
- to the file.
282
- """
283
- log.info("writing %s to %s", what, filename)
284
- data = data.encode("utf-8")
285
- if not self.dry_run:
286
- f = open(filename, 'wb')
287
- f.write(data)
288
- f.close()
289
-
290
- def delete_file(self, filename):
291
- """Delete `filename` (if not a dry run) after announcing it"""
292
- log.info("deleting %s", filename)
293
- if not self.dry_run:
294
- os.unlink(filename)
295
-
296
- def run(self):
297
- self.mkpath(self.egg_info)
298
- os.utime(self.egg_info, None)
299
- for ep in metadata.entry_points(group='egg_info.writers'):
300
- writer = ep.load()
301
- writer(self, ep.name, os.path.join(self.egg_info, ep.name))
302
-
303
- # Get rid of native_libs.txt if it was put there by older bdist_egg
304
- nl = os.path.join(self.egg_info, "native_libs.txt")
305
- if os.path.exists(nl):
306
- self.delete_file(nl)
307
-
308
- self.find_sources()
309
-
310
- def find_sources(self):
311
- """Generate SOURCES.txt manifest file"""
312
- manifest_filename = os.path.join(self.egg_info, "SOURCES.txt")
313
- mm = manifest_maker(self.distribution)
314
- mm.ignore_egg_info_dir = self.ignore_egg_info_in_manifest
315
- mm.manifest = manifest_filename
316
- mm.run()
317
- self.filelist = mm.filelist
318
-
319
- def check_broken_egg_info(self):
320
- bei = self.egg_name + '.egg-info'
321
- if self.egg_base != os.curdir:
322
- bei = os.path.join(self.egg_base, bei)
323
- if os.path.exists(bei):
324
- log.warn(
325
- "-" * 78 + '\n'
326
- "Note: Your current .egg-info directory has a '-' in its name;"
327
- '\nthis will not work correctly with "setup.py develop".\n\n'
328
- 'Please rename %s to %s to correct this problem.\n' + '-' * 78,
329
- bei, self.egg_info
330
- )
331
- self.broken_egg_info = self.egg_info
332
- self.egg_info = bei # make it work for now
333
-
334
-
335
- class FileList(_FileList):
336
- # Implementations of the various MANIFEST.in commands
337
-
338
- def __init__(self, warn=None, debug_print=None, ignore_egg_info_dir=False):
339
- super().__init__(warn, debug_print)
340
- self.ignore_egg_info_dir = ignore_egg_info_dir
341
-
342
- def process_template_line(self, line):
343
- # Parse the line: split it up, make sure the right number of words
344
- # is there, and return the relevant words. 'action' is always
345
- # defined: it's the first word of the line. Which of the other
346
- # three are defined depends on the action; it'll be either
347
- # patterns, (dir and patterns), or (dir_pattern).
348
- (action, patterns, dir, dir_pattern) = self._parse_template_line(line)
349
-
350
- action_map = {
351
- 'include': self.include,
352
- 'exclude': self.exclude,
353
- 'global-include': self.global_include,
354
- 'global-exclude': self.global_exclude,
355
- 'recursive-include': functools.partial(
356
- self.recursive_include, dir,
357
- ),
358
- 'recursive-exclude': functools.partial(
359
- self.recursive_exclude, dir,
360
- ),
361
- 'graft': self.graft,
362
- 'prune': self.prune,
363
- }
364
- log_map = {
365
- 'include': "warning: no files found matching '%s'",
366
- 'exclude': (
367
- "warning: no previously-included files found "
368
- "matching '%s'"
369
- ),
370
- 'global-include': (
371
- "warning: no files found matching '%s' "
372
- "anywhere in distribution"
373
- ),
374
- 'global-exclude': (
375
- "warning: no previously-included files matching "
376
- "'%s' found anywhere in distribution"
377
- ),
378
- 'recursive-include': (
379
- "warning: no files found matching '%s' "
380
- "under directory '%s'"
381
- ),
382
- 'recursive-exclude': (
383
- "warning: no previously-included files matching "
384
- "'%s' found under directory '%s'"
385
- ),
386
- 'graft': "warning: no directories found matching '%s'",
387
- 'prune': "no previously-included directories found matching '%s'",
388
- }
389
-
390
- try:
391
- process_action = action_map[action]
392
- except KeyError:
393
- raise DistutilsInternalError(
394
- "this cannot happen: invalid action '{action!s}'".
395
- format(action=action),
396
- )
397
-
398
- # OK, now we know that the action is valid and we have the
399
- # right number of words on the line for that action -- so we
400
- # can proceed with minimal error-checking.
401
-
402
- action_is_recursive = action.startswith('recursive-')
403
- if action in {'graft', 'prune'}:
404
- patterns = [dir_pattern]
405
- extra_log_args = (dir, ) if action_is_recursive else ()
406
- log_tmpl = log_map[action]
407
-
408
- self.debug_print(
409
- ' '.join(
410
- [action] +
411
- ([dir] if action_is_recursive else []) +
412
- patterns,
413
- )
414
- )
415
- for pattern in patterns:
416
- if not process_action(pattern):
417
- log.warn(log_tmpl, pattern, *extra_log_args)
418
-
419
- def _remove_files(self, predicate):
420
- """
421
- Remove all files from the file list that match the predicate.
422
- Return True if any matching files were removed
423
- """
424
- found = False
425
- for i in range(len(self.files) - 1, -1, -1):
426
- if predicate(self.files[i]):
427
- self.debug_print(" removing " + self.files[i])
428
- del self.files[i]
429
- found = True
430
- return found
431
-
432
- def include(self, pattern):
433
- """Include files that match 'pattern'."""
434
- found = [f for f in glob(pattern) if not os.path.isdir(f)]
435
- self.extend(found)
436
- return bool(found)
437
-
438
- def exclude(self, pattern):
439
- """Exclude files that match 'pattern'."""
440
- match = translate_pattern(pattern)
441
- return self._remove_files(match.match)
442
-
443
- def recursive_include(self, dir, pattern):
444
- """
445
- Include all files anywhere in 'dir/' that match the pattern.
446
- """
447
- full_pattern = os.path.join(dir, '**', pattern)
448
- found = [f for f in glob(full_pattern, recursive=True)
449
- if not os.path.isdir(f)]
450
- self.extend(found)
451
- return bool(found)
452
-
453
- def recursive_exclude(self, dir, pattern):
454
- """
455
- Exclude any file anywhere in 'dir/' that match the pattern.
456
- """
457
- match = translate_pattern(os.path.join(dir, '**', pattern))
458
- return self._remove_files(match.match)
459
-
460
- def graft(self, dir):
461
- """Include all files from 'dir/'."""
462
- found = [
463
- item
464
- for match_dir in glob(dir)
465
- for item in distutils.filelist.findall(match_dir)
466
- ]
467
- self.extend(found)
468
- return bool(found)
469
-
470
- def prune(self, dir):
471
- """Filter out files from 'dir/'."""
472
- match = translate_pattern(os.path.join(dir, '**'))
473
- return self._remove_files(match.match)
474
-
475
- def global_include(self, pattern):
476
- """
477
- Include all files anywhere in the current directory that match the
478
- pattern. This is very inefficient on large file trees.
479
- """
480
- if self.allfiles is None:
481
- self.findall()
482
- match = translate_pattern(os.path.join('**', pattern))
483
- found = [f for f in self.allfiles if match.match(f)]
484
- self.extend(found)
485
- return bool(found)
486
-
487
- def global_exclude(self, pattern):
488
- """
489
- Exclude all files anywhere that match the pattern.
490
- """
491
- match = translate_pattern(os.path.join('**', pattern))
492
- return self._remove_files(match.match)
493
-
494
- def append(self, item):
495
- if item.endswith('\r'): # Fix older sdists built on Windows
496
- item = item[:-1]
497
- path = convert_path(item)
498
-
499
- if self._safe_path(path):
500
- self.files.append(path)
501
-
502
- def extend(self, paths):
503
- self.files.extend(filter(self._safe_path, paths))
504
-
505
- def _repair(self):
506
- """
507
- Replace self.files with only safe paths
508
-
509
- Because some owners of FileList manipulate the underlying
510
- ``files`` attribute directly, this method must be called to
511
- repair those paths.
512
- """
513
- self.files = list(filter(self._safe_path, self.files))
514
-
515
- def _safe_path(self, path):
516
- enc_warn = "'%s' not %s encodable -- skipping"
517
-
518
- # To avoid accidental trans-codings errors, first to unicode
519
- u_path = unicode_utils.filesys_decode(path)
520
- if u_path is None:
521
- log.warn("'%s' in unexpected encoding -- skipping" % path)
522
- return False
523
-
524
- # Must ensure utf-8 encodability
525
- utf8_path = unicode_utils.try_encode(u_path, "utf-8")
526
- if utf8_path is None:
527
- log.warn(enc_warn, path, 'utf-8')
528
- return False
529
-
530
- try:
531
- # ignore egg-info paths
532
- is_egg_info = ".egg-info" in u_path or b".egg-info" in utf8_path
533
- if self.ignore_egg_info_dir and is_egg_info:
534
- return False
535
- # accept is either way checks out
536
- if os.path.exists(u_path) or os.path.exists(utf8_path):
537
- return True
538
- # this will catch any encode errors decoding u_path
539
- except UnicodeEncodeError:
540
- log.warn(enc_warn, path, sys.getfilesystemencoding())
541
-
542
-
543
- class manifest_maker(sdist):
544
- template = "MANIFEST.in"
545
-
546
- def initialize_options(self):
547
- self.use_defaults = 1
548
- self.prune = 1
549
- self.manifest_only = 1
550
- self.force_manifest = 1
551
- self.ignore_egg_info_dir = False
552
-
553
- def finalize_options(self):
554
- pass
555
-
556
- def run(self):
557
- self.filelist = FileList(ignore_egg_info_dir=self.ignore_egg_info_dir)
558
- if not os.path.exists(self.manifest):
559
- self.write_manifest() # it must exist so it'll get in the list
560
- self.add_defaults()
561
- if os.path.exists(self.template):
562
- self.read_template()
563
- self.add_license_files()
564
- self.prune_file_list()
565
- self.filelist.sort()
566
- self.filelist.remove_duplicates()
567
- self.write_manifest()
568
-
569
- def _manifest_normalize(self, path):
570
- path = unicode_utils.filesys_decode(path)
571
- return path.replace(os.sep, '/')
572
-
573
- def write_manifest(self):
574
- """
575
- Write the file list in 'self.filelist' to the manifest file
576
- named by 'self.manifest'.
577
- """
578
- self.filelist._repair()
579
-
580
- # Now _repairs should encodability, but not unicode
581
- files = [self._manifest_normalize(f) for f in self.filelist.files]
582
- msg = "writing manifest file '%s'" % self.manifest
583
- self.execute(write_file, (self.manifest, files), msg)
584
-
585
- def warn(self, msg):
586
- if not self._should_suppress_warning(msg):
587
- sdist.warn(self, msg)
588
-
589
- @staticmethod
590
- def _should_suppress_warning(msg):
591
- """
592
- suppress missing-file warnings from sdist
593
- """
594
- return re.match(r"standard file .*not found", msg)
595
-
596
- def add_defaults(self):
597
- sdist.add_defaults(self)
598
- self.filelist.append(self.template)
599
- self.filelist.append(self.manifest)
600
- rcfiles = list(walk_revctrl())
601
- if rcfiles:
602
- self.filelist.extend(rcfiles)
603
- elif os.path.exists(self.manifest):
604
- self.read_manifest()
605
-
606
- if os.path.exists("setup.py"):
607
- # setup.py should be included by default, even if it's not
608
- # the script called to create the sdist
609
- self.filelist.append("setup.py")
610
-
611
- ei_cmd = self.get_finalized_command('egg_info')
612
- self.filelist.graft(ei_cmd.egg_info)
613
-
614
- def add_license_files(self):
615
- license_files = self.distribution.metadata.license_files or []
616
- for lf in license_files:
617
- log.info("adding license file '%s'", lf)
618
- pass
619
- self.filelist.extend(license_files)
620
-
621
- def prune_file_list(self):
622
- build = self.get_finalized_command('build')
623
- base_dir = self.distribution.get_fullname()
624
- self.filelist.prune(build.build_base)
625
- self.filelist.prune(base_dir)
626
- sep = re.escape(os.sep)
627
- self.filelist.exclude_pattern(r'(^|' + sep + r')(RCS|CVS|\.svn)' + sep,
628
- is_regex=1)
629
-
630
- def _safe_data_files(self, build_py):
631
- """
632
- The parent class implementation of this method
633
- (``sdist``) will try to include data files, which
634
- might cause recursion problems when
635
- ``include_package_data=True``.
636
-
637
- Therefore, avoid triggering any attempt of
638
- analyzing/building the manifest again.
639
- """
640
- if hasattr(build_py, 'get_data_files_without_manifest'):
641
- return build_py.get_data_files_without_manifest()
642
-
643
- warnings.warn(
644
- "Custom 'build_py' does not implement "
645
- "'get_data_files_without_manifest'.\nPlease extend command classes"
646
- " from setuptools instead of distutils.",
647
- SetuptoolsDeprecationWarning
648
- )
649
- return build_py.get_data_files()
650
-
651
-
652
- def write_file(filename, contents):
653
- """Create a file with the specified name and write 'contents' (a
654
- sequence of strings without line terminators) to it.
655
- """
656
- contents = "\n".join(contents)
657
-
658
- # assuming the contents has been vetted for utf-8 encoding
659
- contents = contents.encode("utf-8")
660
-
661
- with open(filename, "wb") as f: # always write POSIX-style manifest
662
- f.write(contents)
663
-
664
-
665
- def write_pkg_info(cmd, basename, filename):
666
- log.info("writing %s", filename)
667
- if not cmd.dry_run:
668
- metadata = cmd.distribution.metadata
669
- metadata.version, oldver = cmd.egg_version, metadata.version
670
- metadata.name, oldname = cmd.egg_name, metadata.name
671
-
672
- try:
673
- # write unescaped data to PKG-INFO, so older pkg_resources
674
- # can still parse it
675
- metadata.write_pkg_info(cmd.egg_info)
676
- finally:
677
- metadata.name, metadata.version = oldname, oldver
678
-
679
- safe = getattr(cmd.distribution, 'zip_safe', None)
680
-
681
- bdist_egg.write_safety_flag(cmd.egg_info, safe)
682
-
683
-
684
- def warn_depends_obsolete(cmd, basename, filename):
685
- if os.path.exists(filename):
686
- log.warn(
687
- "WARNING: 'depends.txt' is not used by setuptools 0.6!\n"
688
- "Use the install_requires/extras_require setup() args instead."
689
- )
690
-
691
-
692
- def _write_requirements(stream, reqs):
693
- lines = yield_lines(reqs or ())
694
-
695
- def append_cr(line):
696
- return line + '\n'
697
- lines = map(append_cr, lines)
698
- stream.writelines(lines)
699
-
700
-
701
- def write_requirements(cmd, basename, filename):
702
- dist = cmd.distribution
703
- data = io.StringIO()
704
- _write_requirements(data, dist.install_requires)
705
- extras_require = dist.extras_require or {}
706
- for extra in sorted(extras_require):
707
- data.write('\n[{extra}]\n'.format(**vars()))
708
- _write_requirements(data, extras_require[extra])
709
- cmd.write_or_delete_file("requirements", filename, data.getvalue())
710
-
711
-
712
- def write_setup_requirements(cmd, basename, filename):
713
- data = io.StringIO()
714
- _write_requirements(data, cmd.distribution.setup_requires)
715
- cmd.write_or_delete_file("setup-requirements", filename, data.getvalue())
716
-
717
-
718
- def write_toplevel_names(cmd, basename, filename):
719
- pkgs = dict.fromkeys(
720
- [
721
- k.split('.', 1)[0]
722
- for k in cmd.distribution.iter_distribution_names()
723
- ]
724
- )
725
- cmd.write_file("top-level names", filename, '\n'.join(sorted(pkgs)) + '\n')
726
-
727
-
728
- def overwrite_arg(cmd, basename, filename):
729
- write_arg(cmd, basename, filename, True)
730
-
731
-
732
- def write_arg(cmd, basename, filename, force=False):
733
- argname = os.path.splitext(basename)[0]
734
- value = getattr(cmd.distribution, argname, None)
735
- if value is not None:
736
- value = '\n'.join(value) + '\n'
737
- cmd.write_or_delete_file(argname, filename, value, force)
738
-
739
-
740
- def write_entries(cmd, basename, filename):
741
- eps = _entry_points.load(cmd.distribution.entry_points)
742
- defn = _entry_points.render(eps)
743
- cmd.write_or_delete_file('entry points', filename, defn, True)
744
-
745
-
746
- def get_pkg_info_revision():
747
- """
748
- Get a -r### off of PKG-INFO Version in case this is an sdist of
749
- a subversion revision.
750
- """
751
- warnings.warn(
752
- "get_pkg_info_revision is deprecated.", EggInfoDeprecationWarning)
753
- if os.path.exists('PKG-INFO'):
754
- with io.open('PKG-INFO') as f:
755
- for line in f:
756
- match = re.match(r"Version:.*-r(\d+)\s*$", line)
757
- if match:
758
- return int(match.group(1))
759
- return 0
760
-
761
-
762
- class EggInfoDeprecationWarning(SetuptoolsDeprecationWarning):
763
- """Deprecated behavior warning for EggInfo, bypassing suppression."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVH-vn1210/make_hair/minigpt4/models/blip2.py DELETED
@@ -1,221 +0,0 @@
1
- """
2
- Copyright (c) 2023, salesforce.com, inc.
3
- All rights reserved.
4
- SPDX-License-Identifier: BSD-3-Clause
5
- For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
6
- """
7
- import contextlib
8
- import logging
9
- import os
10
- import time
11
- import datetime
12
-
13
- import torch
14
- import torch.nn as nn
15
- import torch.distributed as dist
16
- import torch.nn.functional as F
17
-
18
- import minigpt4.common.dist_utils as dist_utils
19
- from minigpt4.common.dist_utils import download_cached_file
20
- from minigpt4.common.utils import is_url
21
- from minigpt4.common.logger import MetricLogger
22
- from minigpt4.models.base_model import BaseModel
23
- from minigpt4.models.Qformer import BertConfig, BertLMHeadModel
24
- from minigpt4.models.eva_vit import create_eva_vit_g
25
- from transformers import BertTokenizer
26
-
27
-
28
- class Blip2Base(BaseModel):
29
- @classmethod
30
- def init_tokenizer(cls):
31
- tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
32
- tokenizer.add_special_tokens({"bos_token": "[DEC]"})
33
- return tokenizer
34
-
35
- def maybe_autocast(self, dtype=torch.float16):
36
- # if on cpu, don't use autocast
37
- # if on gpu, use autocast with dtype if provided, otherwise use torch.float16
38
- enable_autocast = self.device != torch.device("cpu")
39
-
40
- if enable_autocast:
41
- return torch.cuda.amp.autocast(dtype=dtype)
42
- else:
43
- return contextlib.nullcontext()
44
-
45
- @classmethod
46
- def init_Qformer(cls, num_query_token, vision_width, cross_attention_freq=2):
47
- encoder_config = BertConfig.from_pretrained("bert-base-uncased")
48
- encoder_config.encoder_width = vision_width
49
- # insert cross-attention layer every other block
50
- encoder_config.add_cross_attention = True
51
- encoder_config.cross_attention_freq = cross_attention_freq
52
- encoder_config.query_length = num_query_token
53
- Qformer = BertLMHeadModel(config=encoder_config)
54
- query_tokens = nn.Parameter(
55
- torch.zeros(1, num_query_token, encoder_config.hidden_size)
56
- )
57
- query_tokens.data.normal_(mean=0.0, std=encoder_config.initializer_range)
58
- return Qformer, query_tokens
59
-
60
- @classmethod
61
- def init_vision_encoder(
62
- cls, model_name, img_size, drop_path_rate, use_grad_checkpoint, precision
63
- ):
64
- assert model_name == "eva_clip_g", "vit model must be eva_clip_g for current version of MiniGPT-4"
65
- visual_encoder = create_eva_vit_g(
66
- img_size, drop_path_rate, use_grad_checkpoint, precision
67
- )
68
-
69
- ln_vision = LayerNorm(visual_encoder.num_features)
70
- return visual_encoder, ln_vision
71
-
72
- def load_from_pretrained(self, url_or_filename):
73
- if is_url(url_or_filename):
74
- cached_file = download_cached_file(
75
- url_or_filename, check_hash=False, progress=True
76
- )
77
- checkpoint = torch.load(cached_file, map_location="cpu")
78
- elif os.path.isfile(url_or_filename):
79
- checkpoint = torch.load(url_or_filename, map_location="cpu")
80
- else:
81
- raise RuntimeError("checkpoint url or path is invalid")
82
-
83
- state_dict = checkpoint["model"]
84
-
85
- msg = self.load_state_dict(state_dict, strict=False)
86
-
87
- # logging.info("Missing keys {}".format(msg.missing_keys))
88
- logging.info("load checkpoint from %s" % url_or_filename)
89
-
90
- return msg
91
-
92
-
93
- def disabled_train(self, mode=True):
94
- """Overwrite model.train with this function to make sure train/eval mode
95
- does not change anymore."""
96
- return self
97
-
98
-
99
- class LayerNorm(nn.LayerNorm):
100
- """Subclass torch's LayerNorm to handle fp16."""
101
-
102
- def forward(self, x: torch.Tensor):
103
- orig_type = x.dtype
104
- ret = super().forward(x.type(torch.float32))
105
- return ret.type(orig_type)
106
-
107
-
108
- def compute_sim_matrix(model, data_loader, **kwargs):
109
- k_test = kwargs.pop("k_test")
110
-
111
- metric_logger = MetricLogger(delimiter=" ")
112
- header = "Evaluation:"
113
-
114
- logging.info("Computing features for evaluation...")
115
- start_time = time.time()
116
-
117
- texts = data_loader.dataset.text
118
- num_text = len(texts)
119
- text_bs = 256
120
- text_ids = []
121
- text_embeds = []
122
- text_atts = []
123
- for i in range(0, num_text, text_bs):
124
- text = texts[i : min(num_text, i + text_bs)]
125
- text_input = model.tokenizer(
126
- text,
127
- padding="max_length",
128
- truncation=True,
129
- max_length=35,
130
- return_tensors="pt",
131
- ).to(model.device)
132
- text_feat = model.forward_text(text_input)
133
- text_embed = F.normalize(model.text_proj(text_feat))
134
- text_embeds.append(text_embed)
135
- text_ids.append(text_input.input_ids)
136
- text_atts.append(text_input.attention_mask)
137
-
138
- text_embeds = torch.cat(text_embeds, dim=0)
139
- text_ids = torch.cat(text_ids, dim=0)
140
- text_atts = torch.cat(text_atts, dim=0)
141
-
142
- vit_feats = []
143
- image_embeds = []
144
- for samples in data_loader:
145
- image = samples["image"]
146
-
147
- image = image.to(model.device)
148
- image_feat, vit_feat = model.forward_image(image)
149
- image_embed = model.vision_proj(image_feat)
150
- image_embed = F.normalize(image_embed, dim=-1)
151
-
152
- vit_feats.append(vit_feat.cpu())
153
- image_embeds.append(image_embed)
154
-
155
- vit_feats = torch.cat(vit_feats, dim=0)
156
- image_embeds = torch.cat(image_embeds, dim=0)
157
-
158
- sims_matrix = []
159
- for image_embed in image_embeds:
160
- sim_q2t = image_embed @ text_embeds.t()
161
- sim_i2t, _ = sim_q2t.max(0)
162
- sims_matrix.append(sim_i2t)
163
- sims_matrix = torch.stack(sims_matrix, dim=0)
164
-
165
- score_matrix_i2t = torch.full(
166
- (len(data_loader.dataset.image), len(texts)), -100.0
167
- ).to(model.device)
168
-
169
- num_tasks = dist_utils.get_world_size()
170
- rank = dist_utils.get_rank()
171
- step = sims_matrix.size(0) // num_tasks + 1
172
- start = rank * step
173
- end = min(sims_matrix.size(0), start + step)
174
-
175
- for i, sims in enumerate(
176
- metric_logger.log_every(sims_matrix[start:end], 50, header)
177
- ):
178
- topk_sim, topk_idx = sims.topk(k=k_test, dim=0)
179
- image_inputs = vit_feats[start + i].repeat(k_test, 1, 1).to(model.device)
180
- score = model.compute_itm(
181
- image_inputs=image_inputs,
182
- text_ids=text_ids[topk_idx],
183
- text_atts=text_atts[topk_idx],
184
- ).float()
185
- score_matrix_i2t[start + i, topk_idx] = score + topk_sim
186
-
187
- sims_matrix = sims_matrix.t()
188
- score_matrix_t2i = torch.full(
189
- (len(texts), len(data_loader.dataset.image)), -100.0
190
- ).to(model.device)
191
-
192
- step = sims_matrix.size(0) // num_tasks + 1
193
- start = rank * step
194
- end = min(sims_matrix.size(0), start + step)
195
-
196
- for i, sims in enumerate(
197
- metric_logger.log_every(sims_matrix[start:end], 50, header)
198
- ):
199
- topk_sim, topk_idx = sims.topk(k=k_test, dim=0)
200
- image_inputs = vit_feats[topk_idx.cpu()].to(model.device)
201
- score = model.compute_itm(
202
- image_inputs=image_inputs,
203
- text_ids=text_ids[start + i].repeat(k_test, 1),
204
- text_atts=text_atts[start + i].repeat(k_test, 1),
205
- ).float()
206
- score_matrix_t2i[start + i, topk_idx] = score + topk_sim
207
-
208
- if dist_utils.is_dist_avail_and_initialized():
209
- dist.barrier()
210
- torch.distributed.all_reduce(
211
- score_matrix_i2t, op=torch.distributed.ReduceOp.SUM
212
- )
213
- torch.distributed.all_reduce(
214
- score_matrix_t2i, op=torch.distributed.ReduceOp.SUM
215
- )
216
-
217
- total_time = time.time() - start_time
218
- total_time_str = str(datetime.timedelta(seconds=int(total_time)))
219
- logging.info("Evaluation time {}".format(total_time_str))
220
-
221
- return score_matrix_i2t.cpu().numpy(), score_matrix_t2i.cpu().numpy()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVH-vn1210/make_hair/minigpt4/models/blip2_outputs.py DELETED
@@ -1,110 +0,0 @@
1
- """
2
- Copyright (c) 2022, salesforce.com, inc.
3
- All rights reserved.
4
- SPDX-License-Identifier: BSD-3-Clause
5
- For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
6
- """
7
-
8
- from dataclasses import dataclass
9
- from typing import Optional
10
-
11
- import torch
12
- from transformers.modeling_outputs import (
13
- ModelOutput,
14
- BaseModelOutputWithPoolingAndCrossAttentions,
15
- CausalLMOutputWithCrossAttentions,
16
- )
17
-
18
-
19
- @dataclass
20
- class BlipSimilarity(ModelOutput):
21
- sim_i2t: torch.FloatTensor = None
22
- sim_t2i: torch.FloatTensor = None
23
-
24
- sim_i2t_m: Optional[torch.FloatTensor] = None
25
- sim_t2i_m: Optional[torch.FloatTensor] = None
26
-
27
- sim_i2t_targets: Optional[torch.FloatTensor] = None
28
- sim_t2i_targets: Optional[torch.FloatTensor] = None
29
-
30
-
31
- @dataclass
32
- class BlipIntermediateOutput(ModelOutput):
33
- """
34
- Data class for intermediate outputs of BLIP models.
35
-
36
- image_embeds (torch.FloatTensor): Image embeddings, shape (batch_size, num_patches, embed_dim).
37
- text_embeds (torch.FloatTensor): Text embeddings, shape (batch_size, seq_len, embed_dim).
38
-
39
- image_embeds_m (torch.FloatTensor): Image embeddings from momentum visual encoder, shape (batch_size, num_patches, embed_dim).
40
- text_embeds_m (torch.FloatTensor): Text embeddings from momentum text encoder, shape (batch_size, seq_len, embed_dim).
41
-
42
- encoder_output (BaseModelOutputWithPoolingAndCrossAttentions): output from the image-grounded text encoder.
43
- encoder_output_neg (BaseModelOutputWithPoolingAndCrossAttentions): output from the image-grounded text encoder for negative pairs.
44
-
45
- decoder_output (CausalLMOutputWithCrossAttentions): output from the image-grounded text decoder.
46
- decoder_labels (torch.LongTensor): labels for the captioning loss.
47
-
48
- itm_logits (torch.FloatTensor): logits for the image-text matching loss, shape (batch_size * 3, 2).
49
- itm_labels (torch.LongTensor): labels for the image-text matching loss, shape (batch_size * 3,)
50
-
51
- """
52
-
53
- # uni-modal features
54
- image_embeds: torch.FloatTensor = None
55
- text_embeds: Optional[torch.FloatTensor] = None
56
-
57
- image_embeds_m: Optional[torch.FloatTensor] = None
58
- text_embeds_m: Optional[torch.FloatTensor] = None
59
-
60
- # intermediate outputs of multimodal encoder
61
- encoder_output: Optional[BaseModelOutputWithPoolingAndCrossAttentions] = None
62
- encoder_output_neg: Optional[BaseModelOutputWithPoolingAndCrossAttentions] = None
63
-
64
- itm_logits: Optional[torch.FloatTensor] = None
65
- itm_labels: Optional[torch.LongTensor] = None
66
-
67
- # intermediate outputs of multimodal decoder
68
- decoder_output: Optional[CausalLMOutputWithCrossAttentions] = None
69
- decoder_labels: Optional[torch.LongTensor] = None
70
-
71
-
72
- @dataclass
73
- class BlipOutput(ModelOutput):
74
- # some finetuned models (e.g. BlipVQA) do not compute similarity, thus optional.
75
- sims: Optional[BlipSimilarity] = None
76
-
77
- intermediate_output: BlipIntermediateOutput = None
78
-
79
- loss: Optional[torch.FloatTensor] = None
80
-
81
- loss_itc: Optional[torch.FloatTensor] = None
82
-
83
- loss_itm: Optional[torch.FloatTensor] = None
84
-
85
- loss_lm: Optional[torch.FloatTensor] = None
86
-
87
-
88
- @dataclass
89
- class BlipOutputFeatures(ModelOutput):
90
- """
91
- Data class of features from BlipFeatureExtractor.
92
-
93
- Args:
94
- image_embeds: (torch.FloatTensor) of shape (batch_size, num_patches+1, embed_dim), optional
95
- image_features: (torch.FloatTensor) of shape (batch_size, num_patches+1, feature_dim), optional
96
- text_embeds: (torch.FloatTensor) of shape (batch_size, sequence_length+1, embed_dim), optional
97
- text_features: (torch.FloatTensor) of shape (batch_size, sequence_length+1, feature_dim), optional
98
-
99
- The first embedding or feature is for the [CLS] token.
100
-
101
- Features are obtained by projecting the corresponding embedding into a normalized low-dimensional space.
102
- """
103
-
104
- image_embeds: Optional[torch.FloatTensor] = None
105
- image_embeds_proj: Optional[torch.FloatTensor] = None
106
-
107
- text_embeds: Optional[torch.FloatTensor] = None
108
- text_embeds_proj: Optional[torch.FloatTensor] = None
109
-
110
- multimodal_embeds: Optional[torch.FloatTensor] = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/functional.h DELETED
@@ -1,1719 +0,0 @@
1
- /*
2
- * Copyright 2008-2018 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- /*! \file functional.h
19
- * \brief Function objects and tools for manipulating them
20
- */
21
-
22
- #pragma once
23
-
24
- #include <thrust/detail/config.h>
25
- #include <functional>
26
- #include <thrust/detail/functional/placeholder.h>
27
-
28
- namespace thrust
29
- {
30
-
31
- /*! \addtogroup function_objects Function Objects
32
- */
33
-
34
- template<typename Operation> struct unary_traits;
35
-
36
- template<typename Operation> struct binary_traits;
37
-
38
- /*! \addtogroup function_object_adaptors Function Object Adaptors
39
- * \ingroup function_objects
40
- * \{
41
- */
42
-
43
- /*! \p unary_function is an empty base class: it contains no member functions
44
- * or member variables, but only type information. The only reason it exists
45
- * is to make it more convenient to define types that are models of the
46
- * concept Adaptable Unary Function. Specifically, any model of Adaptable
47
- * Unary Function must define nested \c typedefs. Those \c typedefs are
48
- * provided by the base class \p unary_function.
49
- *
50
- * The following code snippet demonstrates how to construct an
51
- * Adaptable Unary Function using \p unary_function.
52
- *
53
- * \code
54
- * struct sine : public thrust::unary_function<float,float>
55
- * {
56
- * __host__ __device__
57
- * float operator()(float x) { return sinf(x); }
58
- * };
59
- * \endcode
60
- *
61
- * \note Because C++11 language support makes the functionality of
62
- * \c unary_function obsolete, its use is optional if C++11 language
63
- * features are enabled.
64
- *
65
- * \see http://www.sgi.com/tech/stl/unary_function.html
66
- * \see binary_function
67
- */
68
- template<typename Argument,
69
- typename Result>
70
- struct unary_function
71
- {
72
- /*! \typedef argument_type
73
- * \brief The type of the function object's argument.
74
- */
75
- typedef Argument argument_type;
76
-
77
- /*! \typedef result_type;
78
- * \brief The type of the function object's result.
79
- */
80
- typedef Result result_type;
81
- }; // end unary_function
82
-
83
- /*! \p binary_function is an empty base class: it contains no member functions
84
- * or member variables, but only type information. The only reason it exists
85
- * is to make it more convenient to define types that are models of the
86
- * concept Adaptable Binary Function. Specifically, any model of Adaptable
87
- * Binary Function must define nested \c typedefs. Those \c typedefs are
88
- * provided by the base class \p binary_function.
89
- *
90
- * The following code snippet demonstrates how to construct an
91
- * Adaptable Binary Function using \p binary_function.
92
- *
93
- * \code
94
- * struct exponentiate : public thrust::binary_function<float,float,float>
95
- * {
96
- * __host__ __device__
97
- * float operator()(float x, float y) { return powf(x,y); }
98
- * };
99
- * \endcode
100
- *
101
- * \note Because C++11 language support makes the functionality of
102
- * \c binary_function obsolete, its use is optional if C++11 language
103
- * features are enabled.
104
- *
105
- * \see http://www.sgi.com/tech/stl/binary_function.html
106
- * \see unary_function
107
- */
108
- template<typename Argument1,
109
- typename Argument2,
110
- typename Result>
111
- struct binary_function
112
- {
113
- /*! \typedef first_argument_type
114
- * \brief The type of the function object's first argument.
115
- */
116
- typedef Argument1 first_argument_type;
117
-
118
- /*! \typedef second_argument_type
119
- * \brief The type of the function object's second argument.
120
- */
121
- typedef Argument2 second_argument_type;
122
-
123
- /*! \typedef result_type
124
- * \brief The type of the function object's result;
125
- */
126
- typedef Result result_type;
127
- }; // end binary_function
128
-
129
- /*! \}
130
- */
131
-
132
-
133
- /*! \addtogroup predefined_function_objects Predefined Function Objects
134
- * \ingroup function_objects
135
- */
136
-
137
- /*! \addtogroup arithmetic_operations Arithmetic Operations
138
- * \ingroup predefined_function_objects
139
- * \{
140
- */
141
-
142
- #define THRUST_UNARY_FUNCTOR_VOID_SPECIALIZATION(func, impl) \
143
- template <> \
144
- struct func<void> \
145
- { \
146
- using is_transparent = void; \
147
- __thrust_exec_check_disable__ \
148
- template <typename T> \
149
- __host__ __device__ \
150
- constexpr auto operator()(T&& x) const \
151
- noexcept(noexcept(impl)) -> decltype(impl) \
152
- { \
153
- return impl; \
154
- } \
155
- }
156
-
157
- #define THRUST_BINARY_FUNCTOR_VOID_SPECIALIZATION(func, impl) \
158
- template <> \
159
- struct func<void> \
160
- { \
161
- using is_transparent = void; \
162
- __thrust_exec_check_disable__ \
163
- template <typename T1, typename T2> \
164
- __host__ __device__ \
165
- constexpr auto operator()(T1&& t1, T2&& t2) const \
166
- noexcept(noexcept(impl)) -> decltype(impl) \
167
- { \
168
- return impl; \
169
- } \
170
- }
171
-
172
- #define THRUST_BINARY_FUNCTOR_VOID_SPECIALIZATION_OP(func, op) \
173
- THRUST_BINARY_FUNCTOR_VOID_SPECIALIZATION( \
174
- func, THRUST_FWD(t1) op THRUST_FWD(t2))
175
-
176
-
177
- /*! \p plus is a function object. Specifically, it is an Adaptable Binary Function.
178
- * If \c f is an object of class <tt>plus<T></tt>, and \c x and \c y are objects
179
- * of class \c T, then <tt>f(x,y)</tt> returns <tt>x+y</tt>.
180
- *
181
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html">Assignable</a>,
182
- * and if \c x and \c y are objects of type \p T, then <tt>x+y</tt> must be defined and must have a return type that is convertible to \c T.
183
- *
184
- * The following code snippet demonstrates how to use <tt>plus</tt> to sum two
185
- * device_vectors of \c floats.
186
- *
187
- * \code
188
- * #include <thrust/device_vector.h>
189
- * #include <thrust/functional.h>
190
- * #include <thrust/sequence.h>
191
- * #include <thrust/fill.h>
192
- * #include <thrust/transform.h>
193
- * ...
194
- * const int N = 1000;
195
- * thrust::device_vector<float> V1(N);
196
- * thrust::device_vector<float> V2(N);
197
- * thrust::device_vector<float> V3(N);
198
- *
199
- * thrust::sequence(V1.begin(), V1.end(), 1);
200
- * thrust::fill(V2.begin(), V2.end(), 75);
201
- *
202
- * thrust::transform(V1.begin(), V1.end(), V2.begin(), V3.begin(),
203
- * thrust::plus<float>());
204
- * // V3 is now {76, 77, 78, ..., 1075}
205
- * \endcode
206
- *
207
- * \see http://www.sgi.com/tech/stl/plus.html
208
- * \see binary_function
209
- */
210
- template<typename T = void>
211
- struct plus
212
- {
213
- /*! \typedef first_argument_type
214
- * \brief The type of the function object's first argument.
215
- */
216
- typedef T first_argument_type;
217
-
218
- /*! \typedef second_argument_type
219
- * \brief The type of the function object's second argument.
220
- */
221
- typedef T second_argument_type;
222
-
223
- /*! \typedef result_type
224
- * \brief The type of the function object's result;
225
- */
226
- typedef T result_type;
227
-
228
- /*! Function call operator. The return value is <tt>lhs + rhs</tt>.
229
- */
230
- __thrust_exec_check_disable__
231
- __host__ __device__
232
- constexpr T operator()(const T &lhs, const T &rhs) const
233
- {
234
- return lhs + rhs;
235
- }
236
- }; // end plus
237
-
238
- THRUST_BINARY_FUNCTOR_VOID_SPECIALIZATION_OP(plus, +);
239
-
240
- /*! \p minus is a function object. Specifically, it is an Adaptable Binary Function.
241
- * If \c f is an object of class <tt>minus<T></tt>, and \c x and \c y are objects
242
- * of class \c T, then <tt>f(x,y)</tt> returns <tt>x-y</tt>.
243
- *
244
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html">Assignable</a>,
245
- * and if \c x and \c y are objects of type \p T, then <tt>x-y</tt> must be defined and must have a return type that is convertible to \c T.
246
- *
247
- * The following code snippet demonstrates how to use <tt>minus</tt> to subtract
248
- * a device_vector of \c floats from another.
249
- *
250
- * \code
251
- * #include <thrust/device_vector.h>
252
- * #include <thrust/functional.h>
253
- * #include <thrust/sequence.h>
254
- * #include <thrust/fill.h>
255
- * #include <thrust/transform.h>
256
- * ...
257
- * const int N = 1000;
258
- * thrust::device_vector<float> V1(N);
259
- * thrust::device_vector<float> V2(N);
260
- * thrust::device_vector<float> V3(N);
261
- *
262
- * thrust::sequence(V1.begin(), V1.end(), 1);
263
- * thrust::fill(V2.begin(), V2.end(), 75);
264
- *
265
- * thrust::transform(V1.begin(), V1.end(), V2.begin(), V3.begin(),
266
- * thrust::minus<float>());
267
- * // V3 is now {-74, -73, -72, ..., 925}
268
- * \endcode
269
- *
270
- * \see http://www.sgi.com/tech/stl/minus.html
271
- * \see binary_function
272
- */
273
- template<typename T = void>
274
- struct minus
275
- {
276
- /*! \typedef first_argument_type
277
- * \brief The type of the function object's first argument.
278
- */
279
- typedef T first_argument_type;
280
-
281
- /*! \typedef second_argument_type
282
- * \brief The type of the function object's second argument.
283
- */
284
- typedef T second_argument_type;
285
-
286
- /*! \typedef result_type
287
- * \brief The type of the function object's result;
288
- */
289
- typedef T result_type;
290
-
291
- /*! Function call operator. The return value is <tt>lhs - rhs</tt>.
292
- */
293
- __thrust_exec_check_disable__
294
- __host__ __device__
295
- constexpr T operator()(const T &lhs, const T &rhs) const
296
- {
297
- return lhs - rhs;
298
- }
299
- }; // end minus
300
-
301
- THRUST_BINARY_FUNCTOR_VOID_SPECIALIZATION_OP(minus, -);
302
-
303
- /*! \p multiplies is a function object. Specifically, it is an Adaptable Binary Function.
304
- * If \c f is an object of class <tt>multiplies<T></tt>, and \c x and \c y are objects
305
- * of class \c T, then <tt>f(x,y)</tt> returns <tt>x*y</tt>.
306
- *
307
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html">Assignable</a>,
308
- * and if \c x and \c y are objects of type \p T, then <tt>x*y</tt> must be defined and must have a return type that is convertible to \c T.
309
- *
310
- * The following code snippet demonstrates how to use <tt>multiplies</tt> to multiply
311
- * two device_vectors of \c floats.
312
- *
313
- * \code
314
- * #include <thrust/device_vector.h>
315
- * #include <thrust/functional.h>
316
- * #include <thrust/sequence.h>
317
- * #include <thrust/fill.h>
318
- * #include <thrust/transform.h>
319
- * ...
320
- * const int N = 1000;
321
- * thrust::device_vector<float> V1(N);
322
- * thrust::device_vector<float> V2(N);
323
- * thrust::device_vector<float> V3(N);
324
- *
325
- * thrust::sequence(V1.begin(), V1.end(), 1);
326
- * thrust::fill(V2.begin(), V2.end(), 75);
327
- *
328
- * thrust::transform(V1.begin(), V1.end(), V2.begin(), V3.begin(),
329
- * thrust::multiplies<float>());
330
- * // V3 is now {75, 150, 225, ..., 75000}
331
- * \endcode
332
- *
333
- * \see http://www.sgi.com/tech/stl/multiplies.html
334
- * \see binary_function
335
- */
336
- template<typename T = void>
337
- struct multiplies
338
- {
339
- /*! \typedef first_argument_type
340
- * \brief The type of the function object's first argument.
341
- */
342
- typedef T first_argument_type;
343
-
344
- /*! \typedef second_argument_type
345
- * \brief The type of the function object's second argument.
346
- */
347
- typedef T second_argument_type;
348
-
349
- /*! \typedef result_type
350
- * \brief The type of the function object's result;
351
- */
352
- typedef T result_type;
353
-
354
- /*! Function call operator. The return value is <tt>lhs * rhs</tt>.
355
- */
356
- __thrust_exec_check_disable__
357
- __host__ __device__
358
- constexpr T operator()(const T &lhs, const T &rhs) const
359
- {
360
- return lhs * rhs;
361
- }
362
- }; // end multiplies
363
-
364
- THRUST_BINARY_FUNCTOR_VOID_SPECIALIZATION_OP(multiplies, *);
365
-
366
- /*! \p divides is a function object. Specifically, it is an Adaptable Binary Function.
367
- * If \c f is an object of class <tt>divides<T></tt>, and \c x and \c y are objects
368
- * of class \c T, then <tt>f(x,y)</tt> returns <tt>x/y</tt>.
369
- *
370
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html">Assignable</a>,
371
- * and if \c x and \c y are objects of type \p T, then <tt>x/y</tt> must be defined and must have a return type that is convertible to \c T.
372
- *
373
- * The following code snippet demonstrates how to use <tt>divides</tt> to divide
374
- * one device_vectors of \c floats by another.
375
- *
376
- * \code
377
- * #include <thrust/device_vector.h>
378
- * #include <thrust/functional.h>
379
- * #include <thrust/sequence.h>
380
- * #include <thrust/fill.h>
381
- * #include <thrust/transform.h>
382
- * ...
383
- * const int N = 1000;
384
- * thrust::device_vector<float> V1(N);
385
- * thrust::device_vector<float> V2(N);
386
- * thrust::device_vector<float> V3(N);
387
- *
388
- * thrust::sequence(V1.begin(), V1.end(), 1);
389
- * thrust::fill(V2.begin(), V2.end(), 75);
390
- *
391
- * thrust::transform(V1.begin(), V1.end(), V2.begin(), V3.begin(),
392
- * thrust::divides<float>());
393
- * // V3 is now {1/75, 2/75, 3/75, ..., 1000/75}
394
- * \endcode
395
- *
396
- * \see http://www.sgi.com/tech/stl/divides.html
397
- * \see binary_function
398
- */
399
- template<typename T = void>
400
- struct divides
401
- {
402
- /*! \typedef first_argument_type
403
- * \brief The type of the function object's first argument.
404
- */
405
- typedef T first_argument_type;
406
-
407
- /*! \typedef second_argument_type
408
- * \brief The type of the function object's second argument.
409
- */
410
- typedef T second_argument_type;
411
-
412
- /*! \typedef result_type
413
- * \brief The type of the function object's result;
414
- */
415
- typedef T result_type;
416
-
417
- /*! Function call operator. The return value is <tt>lhs / rhs</tt>.
418
- */
419
- __thrust_exec_check_disable__
420
- __host__ __device__
421
- constexpr T operator()(const T &lhs, const T &rhs) const
422
- {
423
- return lhs / rhs;
424
- }
425
- }; // end divides
426
-
427
- THRUST_BINARY_FUNCTOR_VOID_SPECIALIZATION_OP(divides, /);
428
-
429
- /*! \p modulus is a function object. Specifically, it is an Adaptable Binary Function.
430
- * If \c f is an object of class <tt>modulus<T></tt>, and \c x and \c y are objects
431
- * of class \c T, then <tt>f(x,y)</tt> returns <tt>x \% y</tt>.
432
- *
433
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html">Assignable</a>,
434
- * and if \c x and \c y are objects of type \p T, then <tt>x \% y</tt> must be defined and must have a return type that is convertible to \c T.
435
- *
436
- * The following code snippet demonstrates how to use <tt>modulus</tt> to take
437
- * the modulus of one device_vectors of \c floats by another.
438
- *
439
- * \code
440
- * #include <thrust/device_vector.h>
441
- * #include <thrust/functional.h>
442
- * #include <thrust/sequence.h>
443
- * #include <thrust/fill.h>
444
- * #include <thrust/transform.h>
445
- * ...
446
- * const int N = 1000;
447
- * thrust::device_vector<float> V1(N);
448
- * thrust::device_vector<float> V2(N);
449
- * thrust::device_vector<float> V3(N);
450
- *
451
- * thrust::sequence(V1.begin(), V1.end(), 1);
452
- * thrust::fill(V2.begin(), V2.end(), 75);
453
- *
454
- * thrust::transform(V1.begin(), V1.end(), V2.begin(), V3.begin(),
455
- * thrust::modulus<int>());
456
- * // V3 is now {1%75, 2%75, 3%75, ..., 1000%75}
457
- * \endcode
458
- *
459
- * \see http://www.sgi.com/tech/stl/modulus.html
460
- * \see binary_function
461
- */
462
- template<typename T = void>
463
- struct modulus
464
- {
465
- /*! \typedef first_argument_type
466
- * \brief The type of the function object's first argument.
467
- */
468
- typedef T first_argument_type;
469
-
470
- /*! \typedef second_argument_type
471
- * \brief The type of the function object's second argument.
472
- */
473
- typedef T second_argument_type;
474
-
475
- /*! \typedef result_type
476
- * \brief The type of the function object's result;
477
- */
478
- typedef T result_type;
479
-
480
- /*! Function call operator. The return value is <tt>lhs % rhs</tt>.
481
- */
482
- __thrust_exec_check_disable__
483
- __host__ __device__
484
- constexpr T operator()(const T &lhs, const T &rhs) const
485
- {
486
- return lhs % rhs;
487
- }
488
- }; // end modulus
489
-
490
- THRUST_BINARY_FUNCTOR_VOID_SPECIALIZATION_OP(modulus, %);
491
-
492
- /*! \p negate is a function object. Specifically, it is an Adaptable Unary Function.
493
- * If \c f is an object of class <tt>negate<T></tt>, and \c x is an object
494
- * of class \c T, then <tt>f(x)</tt> returns <tt>-x</tt>.
495
- *
496
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html">Assignable</a>,
497
- * and if \c x is an object of type \p T, then <tt>-x</tt> must be defined and must have a return type that is convertible to \c T.
498
- *
499
- * The following code snippet demonstrates how to use <tt>negate</tt> to negate
500
- * the elements of a device_vector of \c floats.
501
- *
502
- * \code
503
- * #include <thrust/device_vector.h>
504
- * #include <thrust/functional.h>
505
- * #include <thrust/sequence.h>
506
- * #include <thrust/transform.h>
507
- * ...
508
- * const int N = 1000;
509
- * thrust::device_vector<float> V1(N);
510
- * thrust::device_vector<float> V2(N);
511
- *
512
- * thrust::sequence(V1.begin(), V1.end(), 1);
513
- *
514
- * thrust::transform(V1.begin(), V1.end(), V2.begin(),
515
- * thrust::negate<float>());
516
- * // V2 is now {-1, -2, -3, ..., -1000}
517
- * \endcode
518
- *
519
- * \see http://www.sgi.com/tech/stl/negate.html
520
- * \see unary_function
521
- */
522
- template<typename T = void>
523
- struct negate
524
- {
525
- /*! \typedef argument_type
526
- * \brief The type of the function object's argument.
527
- */
528
- typedef T argument_type;
529
-
530
- /*! \typedef result_type
531
- * \brief The type of the function object's result;
532
- */
533
- typedef T result_type;
534
-
535
- /*! Function call operator. The return value is <tt>-x</tt>.
536
- */
537
- __thrust_exec_check_disable__
538
- __host__ __device__
539
- constexpr T operator()(const T &x) const
540
- {
541
- return -x;
542
- }
543
- }; // end negate
544
-
545
- THRUST_UNARY_FUNCTOR_VOID_SPECIALIZATION(negate, -THRUST_FWD(x));
546
-
547
- /*! \p square is a function object. Specifically, it is an Adaptable Unary Function.
548
- * If \c f is an object of class <tt>square<T></tt>, and \c x is an object
549
- * of class \c T, then <tt>f(x)</tt> returns <tt>x*x</tt>.
550
- *
551
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html">Assignable</a>,
552
- * and if \c x is an object of type \p T, then <tt>x*x</tt> must be defined and must have a return type that is convertible to \c T.
553
- *
554
- * The following code snippet demonstrates how to use <tt>square</tt> to square
555
- * the elements of a device_vector of \c floats.
556
- *
557
- * \code
558
- * #include <thrust/device_vector.h>
559
- * #include <thrust/functional.h>
560
- * #include <thrust/sequence.h>
561
- * #include <thrust/transform.h>
562
- * ...
563
- * const int N = 1000;
564
- * thrust::device_vector<float> V1(N);
565
- * thrust::device_vector<float> V2(N);
566
- *
567
- * thrust::sequence(V1.begin(), V1.end(), 1);
568
- *
569
- * thrust::transform(V1.begin(), V1.end(), V2.begin(),
570
- * thrust::square<float>());
571
- * // V2 is now {1, 4, 9, ..., 1000000}
572
- * \endcode
573
- *
574
- * \see unary_function
575
- */
576
- template<typename T = void>
577
- struct square
578
- {
579
- /*! \typedef argument_type
580
- * \brief The type of the function object's argument.
581
- */
582
- typedef T argument_type;
583
-
584
- /*! \typedef result_type
585
- * \brief The type of the function object's result;
586
- */
587
- typedef T result_type;
588
-
589
- /*! Function call operator. The return value is <tt>x*x</tt>.
590
- */
591
- __thrust_exec_check_disable__
592
- __host__ __device__
593
- constexpr T operator()(const T &x) const
594
- {
595
- return x*x;
596
- }
597
- }; // end square
598
-
599
- THRUST_UNARY_FUNCTOR_VOID_SPECIALIZATION(square, x*x);
600
-
601
- /*! \}
602
- */
603
-
604
- /*! \addtogroup comparison_operations Comparison Operations
605
- * \ingroup predefined_function_objects
606
- * \{
607
- */
608
-
609
- /*! \p equal_to is a function object. Specifically, it is an Adaptable Binary
610
- * Predicate, which means it is a function object that tests the truth or falsehood
611
- * of some condition. If \c f is an object of class <tt>equal_to<T></tt> and \c x
612
- * and \c y are objects of class \c T, then <tt>f(x,y)</tt> returns \c true if
613
- * <tt>x == y</tt> and \c false otherwise.
614
- *
615
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/EqualityComparable.html">Equality Comparable</a>.
616
- *
617
- * \see http://www.sgi.com/tech/stl/equal_to.html
618
- * \see binary_function
619
- */
620
- template<typename T = void>
621
- struct equal_to
622
- {
623
- /*! \typedef first_argument_type
624
- * \brief The type of the function object's first argument.
625
- */
626
- typedef T first_argument_type;
627
-
628
- /*! \typedef second_argument_type
629
- * \brief The type of the function object's second argument.
630
- */
631
- typedef T second_argument_type;
632
-
633
- /*! \typedef result_type
634
- * \brief The type of the function object's result;
635
- */
636
- typedef bool result_type;
637
-
638
- /*! Function call operator. The return value is <tt>lhs == rhs</tt>.
639
- */
640
- __thrust_exec_check_disable__
641
- __host__ __device__
642
- constexpr bool operator()(const T &lhs, const T &rhs) const
643
- {
644
- return lhs == rhs;
645
- }
646
- }; // end equal_to
647
-
648
- THRUST_BINARY_FUNCTOR_VOID_SPECIALIZATION_OP(equal_to, ==);
649
-
650
- /*! \p not_equal_to is a function object. Specifically, it is an Adaptable Binary
651
- * Predicate, which means it is a function object that tests the truth or falsehood
652
- * of some condition. If \c f is an object of class <tt>not_equal_to<T></tt> and \c x
653
- * and \c y are objects of class \c T, then <tt>f(x,y)</tt> returns \c true if
654
- * <tt>x != y</tt> and \c false otherwise.
655
- *
656
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/EqualityComparable.html">Equality Comparable</a>.
657
- *
658
- * \see http://www.sgi.com/tech/stl/not_equal_to.html
659
- * \see binary_function
660
- */
661
- template<typename T = void>
662
- struct not_equal_to
663
- {
664
- /*! \typedef first_argument_type
665
- * \brief The type of the function object's first argument.
666
- */
667
- typedef T first_argument_type;
668
-
669
- /*! \typedef second_argument_type
670
- * \brief The type of the function object's second argument.
671
- */
672
- typedef T second_argument_type;
673
-
674
- /*! \typedef result_type
675
- * \brief The type of the function object's result;
676
- */
677
- typedef bool result_type;
678
-
679
- /*! Function call operator. The return value is <tt>lhs != rhs</tt>.
680
- */
681
- __thrust_exec_check_disable__
682
- __host__ __device__
683
- constexpr bool operator()(const T &lhs, const T &rhs) const
684
- {
685
- return lhs != rhs;
686
- }
687
- }; // end not_equal_to
688
-
689
- THRUST_BINARY_FUNCTOR_VOID_SPECIALIZATION_OP(not_equal_to, !=);
690
-
691
- /*! \p greater is a function object. Specifically, it is an Adaptable Binary
692
- * Predicate, which means it is a function object that tests the truth or falsehood
693
- * of some condition. If \c f is an object of class <tt>greater<T></tt> and \c x
694
- * and \c y are objects of class \c T, then <tt>f(x,y)</tt> returns \c true if
695
- * <tt>x > y</tt> and \c false otherwise.
696
- *
697
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>.
698
- *
699
- * \see http://www.sgi.com/tech/stl/greater.html
700
- * \see binary_function
701
- */
702
- template<typename T = void>
703
- struct greater
704
- {
705
- /*! \typedef first_argument_type
706
- * \brief The type of the function object's first argument.
707
- */
708
- typedef T first_argument_type;
709
-
710
- /*! \typedef second_argument_type
711
- * \brief The type of the function object's second argument.
712
- */
713
- typedef T second_argument_type;
714
-
715
- /*! \typedef result_type
716
- * \brief The type of the function object's result;
717
- */
718
- typedef bool result_type;
719
-
720
- /*! Function call operator. The return value is <tt>lhs > rhs</tt>.
721
- */
722
- __thrust_exec_check_disable__
723
- __host__ __device__
724
- constexpr bool operator()(const T &lhs, const T &rhs) const
725
- {
726
- return lhs > rhs;
727
- }
728
- }; // end greater
729
-
730
- THRUST_BINARY_FUNCTOR_VOID_SPECIALIZATION_OP(greater, >);
731
-
732
- /*! \p less is a function object. Specifically, it is an Adaptable Binary
733
- * Predicate, which means it is a function object that tests the truth or falsehood
734
- * of some condition. If \c f is an object of class <tt>less<T></tt> and \c x
735
- * and \c y are objects of class \c T, then <tt>f(x,y)</tt> returns \c true if
736
- * <tt>x < y</tt> and \c false otherwise.
737
- *
738
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>.
739
- *
740
- * \see http://www.sgi.com/tech/stl/less.html
741
- * \see binary_function
742
- */
743
- template<typename T = void>
744
- struct less
745
- {
746
- /*! \typedef first_argument_type
747
- * \brief The type of the function object's first argument.
748
- */
749
- typedef T first_argument_type;
750
-
751
- /*! \typedef second_argument_type
752
- * \brief The type of the function object's second argument.
753
- */
754
- typedef T second_argument_type;
755
-
756
- /*! \typedef result_type
757
- * \brief The type of the function object's result;
758
- */
759
- typedef bool result_type;
760
-
761
- /*! Function call operator. The return value is <tt>lhs < rhs</tt>.
762
- */
763
- __thrust_exec_check_disable__
764
- __host__ __device__
765
- constexpr bool operator()(const T &lhs, const T &rhs) const
766
- {
767
- return lhs < rhs;
768
- }
769
- }; // end less
770
-
771
- THRUST_BINARY_FUNCTOR_VOID_SPECIALIZATION_OP(less, <);
772
-
773
- /*! \p greater_equal is a function object. Specifically, it is an Adaptable Binary
774
- * Predicate, which means it is a function object that tests the truth or falsehood
775
- * of some condition. If \c f is an object of class <tt>greater_equal<T></tt> and \c x
776
- * and \c y are objects of class \c T, then <tt>f(x,y)</tt> returns \c true if
777
- * <tt>x >= y</tt> and \c false otherwise.
778
- *
779
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>.
780
- *
781
- * \see http://www.sgi.com/tech/stl/greater_equal.html
782
- * \see binary_function
783
- */
784
- template<typename T = void>
785
- struct greater_equal
786
- {
787
- /*! \typedef first_argument_type
788
- * \brief The type of the function object's first argument.
789
- */
790
- typedef T first_argument_type;
791
-
792
- /*! \typedef second_argument_type
793
- * \brief The type of the function object's second argument.
794
- */
795
- typedef T second_argument_type;
796
-
797
- /*! \typedef result_type
798
- * \brief The type of the function object's result;
799
- */
800
- typedef bool result_type;
801
-
802
- /*! Function call operator. The return value is <tt>lhs >= rhs</tt>.
803
- */
804
- __thrust_exec_check_disable__
805
- __host__ __device__
806
- constexpr bool operator()(const T &lhs, const T &rhs) const
807
- {
808
- return lhs >= rhs;
809
- }
810
- }; // end greater_equal
811
-
812
- THRUST_BINARY_FUNCTOR_VOID_SPECIALIZATION_OP(greater_equal, >=);
813
-
814
- /*! \p less_equal is a function object. Specifically, it is an Adaptable Binary
815
- * Predicate, which means it is a function object that tests the truth or falsehood
816
- * of some condition. If \c f is an object of class <tt>less_equal<T></tt> and \c x
817
- * and \c y are objects of class \c T, then <tt>f(x,y)</tt> returns \c true if
818
- * <tt>x <= y</tt> and \c false otherwise.
819
- *
820
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>.
821
- *
822
- * \see http://www.sgi.com/tech/stl/less_equal.html
823
- * \see binary_function
824
- */
825
- template<typename T = void>
826
- struct less_equal
827
- {
828
- /*! \typedef first_argument_type
829
- * \brief The type of the function object's first argument.
830
- */
831
- typedef T first_argument_type;
832
-
833
- /*! \typedef second_argument_type
834
- * \brief The type of the function object's second argument.
835
- */
836
- typedef T second_argument_type;
837
-
838
- /*! \typedef result_type
839
- * \brief The type of the function object's result;
840
- */
841
- typedef bool result_type;
842
-
843
- /*! Function call operator. The return value is <tt>lhs <= rhs</tt>.
844
- */
845
- __thrust_exec_check_disable__
846
- __host__ __device__
847
- constexpr bool operator()(const T &lhs, const T &rhs) const
848
- {
849
- return lhs <= rhs;
850
- }
851
- }; // end less_equal
852
-
853
- THRUST_BINARY_FUNCTOR_VOID_SPECIALIZATION_OP(less_equal, <=);
854
-
855
- /*! \}
856
- */
857
-
858
-
859
- /*! \addtogroup logical_operations Logical Operations
860
- * \ingroup predefined_function_objects
861
- * \{
862
- */
863
-
864
- /*! \p logical_and is a function object. Specifically, it is an Adaptable Binary Predicate,
865
- * which means it is a function object that tests the truth or falsehood of some condition.
866
- * If \c f is an object of class <tt>logical_and<T></tt> and \c x and \c y are objects of
867
- * class \c T (where \c T is convertible to \c bool) then <tt>f(x,y)</tt> returns \c true
868
- * if and only if both \c x and \c y are \c true.
869
- *
870
- * \tparam T must be convertible to \c bool.
871
- *
872
- * \see http://www.sgi.com/tech/stl/logical_and.html
873
- * \see binary_function
874
- */
875
- template<typename T = void>
876
- struct logical_and
877
- {
878
- /*! \typedef first_argument_type
879
- * \brief The type of the function object's first argument.
880
- */
881
- typedef T first_argument_type;
882
-
883
- /*! \typedef second_argument_type
884
- * \brief The type of the function object's second argument.
885
- */
886
- typedef T second_argument_type;
887
-
888
- /*! \typedef result_type
889
- * \brief The type of the function object's result;
890
- */
891
- typedef bool result_type;
892
-
893
- /*! Function call operator. The return value is <tt>lhs && rhs</tt>.
894
- */
895
- __thrust_exec_check_disable__
896
- __host__ __device__
897
- constexpr bool operator()(const T &lhs, const T &rhs) const
898
- {
899
- return lhs && rhs;
900
- }
901
- }; // end logical_and
902
-
903
- THRUST_BINARY_FUNCTOR_VOID_SPECIALIZATION_OP(logical_and, &&);
904
-
905
- /*! \p logical_or is a function object. Specifically, it is an Adaptable Binary Predicate,
906
- * which means it is a function object that tests the truth or falsehood of some condition.
907
- * If \c f is an object of class <tt>logical_or<T></tt> and \c x and \c y are objects of
908
- * class \c T (where \c T is convertible to \c bool) then <tt>f(x,y)</tt> returns \c true
909
- * if and only if either \c x or \c y are \c true.
910
- *
911
- * \tparam T must be convertible to \c bool.
912
- *
913
- * \see http://www.sgi.com/tech/stl/logical_or.html
914
- * \see binary_function
915
- */
916
- template<typename T = void>
917
- struct logical_or
918
- {
919
- /*! \typedef first_argument_type
920
- * \brief The type of the function object's first argument.
921
- */
922
- typedef T first_argument_type;
923
-
924
- /*! \typedef second_argument_type
925
- * \brief The type of the function object's second argument.
926
- */
927
- typedef T second_argument_type;
928
-
929
- /*! \typedef result_type
930
- * \brief The type of the function object's result;
931
- */
932
- typedef bool result_type;
933
-
934
- /*! Function call operator. The return value is <tt>lhs || rhs</tt>.
935
- */
936
- __thrust_exec_check_disable__
937
- __host__ __device__
938
- constexpr bool operator()(const T &lhs, const T &rhs) const
939
- {
940
- return lhs || rhs;
941
- }
942
- }; // end logical_or
943
-
944
- THRUST_BINARY_FUNCTOR_VOID_SPECIALIZATION_OP(logical_or, ||);
945
-
946
- /*! \p logical_not is a function object. Specifically, it is an Adaptable Predicate,
947
- * which means it is a function object that tests the truth or falsehood of some condition.
948
- * If \c f is an object of class <tt>logical_not<T></tt> and \c x is an object of
949
- * class \c T (where \c T is convertible to \c bool) then <tt>f(x)</tt> returns \c true
950
- * if and only if \c x is \c false.
951
- *
952
- * \tparam T must be convertible to \c bool.
953
- *
954
- * The following code snippet demonstrates how to use \p logical_not to transform
955
- * a device_vector of \c bools into its logical complement.
956
- *
957
- * \code
958
- * #include <thrust/device_vector.h>
959
- * #include <thrust/transform.h>
960
- * #include <thrust/functional.h>
961
- * ...
962
- * thrust::device_vector<bool> V;
963
- * ...
964
- * thrust::transform(V.begin(), V.end(), V.begin(), thrust::logical_not<bool>());
965
- * // The elements of V are now the logical complement of what they were prior
966
- * \endcode
967
- *
968
- * \see http://www.sgi.com/tech/stl/logical_not.html
969
- * \see unary_function
970
- */
971
- template<typename T = void>
972
- struct logical_not
973
- {
974
- /*! \typedef first_argument_type
975
- * \brief The type of the function object's first argument.
976
- */
977
- typedef T first_argument_type;
978
-
979
- /*! \typedef second_argument_type
980
- * \brief The type of the function object's second argument.
981
- */
982
- typedef T second_argument_type;
983
-
984
- /*! \typedef result_type
985
- * \brief The type of the function object's result;
986
- */
987
- typedef bool result_type;
988
-
989
- /*! Function call operator. The return value is <tt>!x</tt>.
990
- */
991
- __thrust_exec_check_disable__
992
- __host__ __device__
993
- constexpr bool operator()(const T &x) const
994
- {
995
- return !x;
996
- }
997
- }; // end logical_not
998
-
999
- THRUST_UNARY_FUNCTOR_VOID_SPECIALIZATION(logical_not, !THRUST_FWD(x));
1000
-
1001
- /*! \}
1002
- */
1003
-
1004
- /*! \addtogroup bitwise_operations Bitwise Operations
1005
- * \ingroup predefined_function_objects
1006
- * \{
1007
- */
1008
-
1009
- /*! \p bit_and is a function object. Specifically, it is an Adaptable Binary Function.
1010
- * If \c f is an object of class <tt>bit_and<T></tt>, and \c x and \c y are objects
1011
- * of class \c T, then <tt>f(x,y)</tt> returns <tt>x&y</tt>.
1012
- *
1013
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html">Assignable</a>,
1014
- * and if \c x and \c y are objects of type \p T, then <tt>x&y</tt> must be defined and must have a return type that is convertible to \c T.
1015
- *
1016
- * The following code snippet demonstrates how to use <tt>bit_and</tt> to take
1017
- * the bitwise AND of one device_vector of \c ints by another.
1018
- *
1019
- * \code
1020
- * #include <thrust/device_vector.h>
1021
- * #include <thrust/functional.h>
1022
- * #include <thrust/sequence.h>
1023
- * #include <thrust/fill.h>
1024
- * #include <thrust/transform.h>
1025
- * ...
1026
- * const int N = 1000;
1027
- * thrust::device_vector<int> V1(N);
1028
- * thrust::device_vector<int> V2(N);
1029
- * thrust::device_vector<int> V3(N);
1030
- *
1031
- * thrust::sequence(V1.begin(), V1.end(), 1);
1032
- * thrust::fill(V2.begin(), V2.end(), 13);
1033
- *
1034
- * thrust::transform(V1.begin(), V1.end(), V2.begin(), V3.begin(),
1035
- * thrust::bit_and<int>());
1036
- * // V3 is now {1&13, 2&13, 3&13, ..., 1000%13}
1037
- * \endcode
1038
- *
1039
- * \see binary_function
1040
- */
1041
- template<typename T = void>
1042
- struct bit_and
1043
- {
1044
- /*! \typedef first_argument_type
1045
- * \brief The type of the function object's first argument.
1046
- */
1047
- typedef T first_argument_type;
1048
-
1049
- /*! \typedef second_argument_type
1050
- * \brief The type of the function object's second argument.
1051
- */
1052
- typedef T second_argument_type;
1053
-
1054
- /*! \typedef result_type
1055
- * \brief The type of the function object's result;
1056
- */
1057
- typedef T result_type;
1058
-
1059
- /*! Function call operator. The return value is <tt>lhs & rhs</tt>.
1060
- */
1061
- __thrust_exec_check_disable__
1062
- __host__ __device__
1063
- constexpr T operator()(const T &lhs, const T &rhs) const
1064
- {
1065
- return lhs & rhs;
1066
- }
1067
- }; // end bit_and
1068
-
1069
- THRUST_BINARY_FUNCTOR_VOID_SPECIALIZATION_OP(bit_and, &);
1070
-
1071
- /*! \p bit_or is a function object. Specifically, it is an Adaptable Binary Function.
1072
- * If \c f is an object of class <tt>bit_and<T></tt>, and \c x and \c y are objects
1073
- * of class \c T, then <tt>f(x,y)</tt> returns <tt>x|y</tt>.
1074
- *
1075
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html">Assignable</a>,
1076
- * and if \c x and \c y are objects of type \p T, then <tt>x|y</tt> must be defined and must have a return type that is convertible to \c T.
1077
- *
1078
- * The following code snippet demonstrates how to use <tt>bit_or</tt> to take
1079
- * the bitwise OR of one device_vector of \c ints by another.
1080
- *
1081
- * \code
1082
- * #include <thrust/device_vector.h>
1083
- * #include <thrust/functional.h>
1084
- * #include <thrust/sequence.h>
1085
- * #include <thrust/fill.h>
1086
- * #include <thrust/transform.h>
1087
- * ...
1088
- * const int N = 1000;
1089
- * thrust::device_vector<int> V1(N);
1090
- * thrust::device_vector<int> V2(N);
1091
- * thrust::device_vector<int> V3(N);
1092
- *
1093
- * thrust::sequence(V1.begin(), V1.end(), 1);
1094
- * thrust::fill(V2.begin(), V2.end(), 13);
1095
- *
1096
- * thrust::transform(V1.begin(), V1.end(), V2.begin(), V3.begin(),
1097
- * thrust::bit_or<int>());
1098
- * // V3 is now {1|13, 2|13, 3|13, ..., 1000|13}
1099
- * \endcode
1100
- *
1101
- * \see binary_function
1102
- */
1103
- template<typename T = void>
1104
- struct bit_or
1105
- {
1106
- /*! \typedef first_argument_type
1107
- * \brief The type of the function object's first argument.
1108
- */
1109
- typedef T first_argument_type;
1110
-
1111
- /*! \typedef second_argument_type
1112
- * \brief The type of the function object's second argument.
1113
- */
1114
- typedef T second_argument_type;
1115
-
1116
- /*! \typedef result_type
1117
- * \brief The type of the function object's result;
1118
- */
1119
- typedef T result_type;
1120
-
1121
- /*! Function call operator. The return value is <tt>lhs | rhs</tt>.
1122
- */
1123
- __thrust_exec_check_disable__
1124
- __host__ __device__
1125
- constexpr T operator()(const T &lhs, const T &rhs) const
1126
- {
1127
- return lhs | rhs;
1128
- }
1129
- }; // end bit_or
1130
-
1131
- THRUST_BINARY_FUNCTOR_VOID_SPECIALIZATION_OP(bit_or, |);
1132
-
1133
- /*! \p bit_xor is a function object. Specifically, it is an Adaptable Binary Function.
1134
- * If \c f is an object of class <tt>bit_and<T></tt>, and \c x and \c y are objects
1135
- * of class \c T, then <tt>f(x,y)</tt> returns <tt>x^y</tt>.
1136
- *
1137
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html">Assignable</a>,
1138
- * and if \c x and \c y are objects of type \p T, then <tt>x^y</tt> must be defined and must have a return type that is convertible to \c T.
1139
- *
1140
- * The following code snippet demonstrates how to use <tt>bit_xor</tt> to take
1141
- * the bitwise XOR of one device_vector of \c ints by another.
1142
- *
1143
- * \code
1144
- * #include <thrust/device_vector.h>
1145
- * #include <thrust/functional.h>
1146
- * #include <thrust/sequence.h>
1147
- * #include <thrust/fill.h>
1148
- * #include <thrust/transform.h>
1149
- * ...
1150
- * const int N = 1000;
1151
- * thrust::device_vector<int> V1(N);
1152
- * thrust::device_vector<int> V2(N);
1153
- * thrust::device_vector<int> V3(N);
1154
- *
1155
- * thrust::sequence(V1.begin(), V1.end(), 1);
1156
- * thrust::fill(V2.begin(), V2.end(), 13);
1157
- *
1158
- * thrust::transform(V1.begin(), V1.end(), V2.begin(), V3.begin(),
1159
- * thrust::bit_xor<int>());
1160
- * // V3 is now {1^13, 2^13, 3^13, ..., 1000^13}
1161
- * \endcode
1162
- *
1163
- * \see binary_function
1164
- */
1165
- template<typename T = void>
1166
- struct bit_xor
1167
- {
1168
- /*! \typedef first_argument_type
1169
- * \brief The type of the function object's first argument.
1170
- */
1171
- typedef T first_argument_type;
1172
-
1173
- /*! \typedef second_argument_type
1174
- * \brief The type of the function object's second argument.
1175
- */
1176
- typedef T second_argument_type;
1177
-
1178
- /*! \typedef result_type
1179
- * \brief The type of the function object's result;
1180
- */
1181
- typedef T result_type;
1182
-
1183
- /*! Function call operator. The return value is <tt>lhs ^ rhs</tt>.
1184
- */
1185
- __thrust_exec_check_disable__
1186
- __host__ __device__
1187
- constexpr T operator()(const T &lhs, const T &rhs) const
1188
- {
1189
- return lhs ^ rhs;
1190
- }
1191
- }; // end bit_xor
1192
-
1193
- THRUST_BINARY_FUNCTOR_VOID_SPECIALIZATION_OP(bit_xor, ^);
1194
-
1195
- /*! \}
1196
- */
1197
-
1198
- /*! \addtogroup generalized_identity_operations Generalized Identity Operations
1199
- * \ingroup predefined_function_objects
1200
- * \{
1201
- */
1202
-
1203
- /*! \p identity is a Unary Function that represents the identity function: it takes
1204
- * a single argument \c x, and returns \c x.
1205
- *
1206
- * \tparam T No requirements on \p T.
1207
- *
1208
- * The following code snippet demonstrates that \p identity returns its
1209
- * argument.
1210
- *
1211
- * \code
1212
- * #include <thrust/functional.h>
1213
- * #include <assert.h>
1214
- * ...
1215
- * int x = 137;
1216
- * thrust::identity<int> id;
1217
- * assert(x == id(x));
1218
- * \endcode
1219
- *
1220
- * \see http://www.sgi.com/tech/stl/identity.html
1221
- * \see unary_function
1222
- */
1223
- template<typename T = void>
1224
- struct identity
1225
- {
1226
- /*! \typedef argument_type
1227
- * \brief The type of the function object's first argument.
1228
- */
1229
- typedef T argument_type;
1230
-
1231
- /*! \typedef result_type
1232
- * \brief The type of the function object's result;
1233
- */
1234
- typedef T result_type;
1235
-
1236
- /*! Function call operator. The return value is <tt>x</tt>.
1237
- */
1238
- __thrust_exec_check_disable__
1239
- __host__ __device__
1240
- constexpr const T &operator()(const T &x) const
1241
- {
1242
- return x;
1243
- }
1244
- }; // end identity
1245
-
1246
- THRUST_UNARY_FUNCTOR_VOID_SPECIALIZATION(identity, THRUST_FWD(x));
1247
-
1248
- /*! \p maximum is a function object that takes two arguments and returns the greater
1249
- * of the two. Specifically, it is an Adaptable Binary Function. If \c f is an
1250
- * object of class <tt>maximum<T></tt> and \c x and \c y are objects of class \c T
1251
- * <tt>f(x,y)</tt> returns \c x if <tt>x > y</tt> and \c y, otherwise.
1252
- *
1253
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>.
1254
- *
1255
- * The following code snippet demonstrates that \p maximum returns its
1256
- * greater argument.
1257
- *
1258
- * \code
1259
- * #include <thrust/functional.h>
1260
- * #include <assert.h>
1261
- * ...
1262
- * int x = 137;
1263
- * int y = -137;
1264
- * thrust::maximum<int> mx;
1265
- * assert(x == mx(x,y));
1266
- * \endcode
1267
- *
1268
- * \see minimum
1269
- * \see min
1270
- * \see binary_function
1271
- */
1272
- template<typename T = void>
1273
- struct maximum
1274
- {
1275
- /*! \typedef first_argument_type
1276
- * \brief The type of the function object's first argument.
1277
- */
1278
- typedef T first_argument_type;
1279
-
1280
- /*! \typedef second_argument_type
1281
- * \brief The type of the function object's second argument.
1282
- */
1283
- typedef T second_argument_type;
1284
-
1285
- /*! \typedef result_type
1286
- * \brief The type of the function object's result;
1287
- */
1288
- typedef T result_type;
1289
-
1290
- /*! Function call operator. The return value is <tt>rhs < lhs ? lhs : rhs</tt>.
1291
- */
1292
- __thrust_exec_check_disable__
1293
- __host__ __device__
1294
- constexpr T operator()(const T &lhs, const T &rhs) const
1295
- {
1296
- return lhs < rhs ? rhs : lhs;
1297
- }
1298
- }; // end maximum
1299
-
1300
- THRUST_BINARY_FUNCTOR_VOID_SPECIALIZATION(maximum,
1301
- t1 < t2 ? THRUST_FWD(t2)
1302
- : THRUST_FWD(t1));
1303
-
1304
- /*! \p minimum is a function object that takes two arguments and returns the lesser
1305
- * of the two. Specifically, it is an Adaptable Binary Function. If \c f is an
1306
- * object of class <tt>minimum<T></tt> and \c x and \c y are objects of class \c T
1307
- * <tt>f(x,y)</tt> returns \c x if <tt>x < y</tt> and \c y, otherwise.
1308
- *
1309
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>.
1310
- *
1311
- * The following code snippet demonstrates that \p minimum returns its
1312
- * lesser argument.
1313
- *
1314
- * \code
1315
- * #include <thrust/functional.h>
1316
- * #include <assert.h>
1317
- * ...
1318
- * int x = 137;
1319
- * int y = -137;
1320
- * thrust::minimum<int> mn;
1321
- * assert(y == mn(x,y));
1322
- * \endcode
1323
- *
1324
- * \see maximum
1325
- * \see max
1326
- * \see binary_function
1327
- */
1328
- template<typename T = void>
1329
- struct minimum
1330
- {
1331
- /*! \typedef first_argument_type
1332
- * \brief The type of the function object's first argument.
1333
- */
1334
- typedef T first_argument_type;
1335
-
1336
- /*! \typedef second_argument_type
1337
- * \brief The type of the function object's second argument.
1338
- */
1339
- typedef T second_argument_type;
1340
-
1341
- /*! \typedef result_type
1342
- * \brief The type of the function object's result;
1343
- */
1344
- typedef T result_type;
1345
-
1346
- /*! Function call operator. The return value is <tt>lhs < rhs ? lhs : rhs</tt>.
1347
- */
1348
- __thrust_exec_check_disable__
1349
- __host__ __device__
1350
- constexpr T operator()(const T &lhs, const T &rhs) const
1351
- {
1352
- return lhs < rhs ? lhs : rhs;
1353
- }
1354
- }; // end minimum
1355
-
1356
- THRUST_BINARY_FUNCTOR_VOID_SPECIALIZATION(minimum,
1357
- t1 < t2 ? THRUST_FWD(t1)
1358
- : THRUST_FWD(t2));
1359
-
1360
- /*! \p project1st is a function object that takes two arguments and returns
1361
- * its first argument; the second argument is unused. It is essentially a
1362
- * generalization of identity to the case of a Binary Function.
1363
- *
1364
- * \code
1365
- * #include <thrust/functional.h>
1366
- * #include <assert.h>
1367
- * ...
1368
- * int x = 137;
1369
- * int y = -137;
1370
- * thrust::project1st<int> pj1;
1371
- * assert(x == pj1(x,y));
1372
- * \endcode
1373
- *
1374
- * \see identity
1375
- * \see project2nd
1376
- * \see binary_function
1377
- */
1378
- template<typename T1 = void, typename T2 = void>
1379
- struct project1st
1380
- {
1381
- /*! \typedef first_argument_type
1382
- * \brief The type of the function object's first argument.
1383
- */
1384
- typedef T1 first_argument_type;
1385
-
1386
- /*! \typedef second_argument_type
1387
- * \brief The type of the function object's second argument.
1388
- */
1389
- typedef T2 second_argument_type;
1390
-
1391
- /*! \typedef result_type
1392
- * \brief The type of the function object's result;
1393
- */
1394
- typedef T1 result_type;
1395
-
1396
- /*! Function call operator. The return value is <tt>lhs</tt>.
1397
- */
1398
- __host__ __device__
1399
- constexpr const T1 &operator()(const T1 &lhs, const T2 & /*rhs*/) const
1400
- {
1401
- return lhs;
1402
- }
1403
- }; // end project1st
1404
-
1405
- template <>
1406
- struct project1st<void, void>
1407
- {
1408
- using is_transparent = void;
1409
- __thrust_exec_check_disable__
1410
- template <typename T1, typename T2>
1411
- __host__ __device__
1412
- constexpr auto operator()(T1&& t1, T2&&) const
1413
- noexcept(noexcept(THRUST_FWD(t1))) -> decltype(THRUST_FWD(t1))
1414
- {
1415
- return THRUST_FWD(t1);
1416
- }
1417
- };
1418
-
1419
- /*! \p project2nd is a function object that takes two arguments and returns
1420
- * its second argument; the first argument is unused. It is essentially a
1421
- * generalization of identity to the case of a Binary Function.
1422
- *
1423
- * \code
1424
- * #include <thrust/functional.h>
1425
- * #include <assert.h>
1426
- * ...
1427
- * int x = 137;
1428
- * int y = -137;
1429
- * thrust::project2nd<int> pj2;
1430
- * assert(y == pj2(x,y));
1431
- * \endcode
1432
- *
1433
- * \see identity
1434
- * \see project1st
1435
- * \see binary_function
1436
- */
1437
- template<typename T1 = void, typename T2 = void>
1438
- struct project2nd
1439
- {
1440
- /*! \typedef first_argument_type
1441
- * \brief The type of the function object's first argument.
1442
- */
1443
- typedef T1 first_argument_type;
1444
-
1445
- /*! \typedef second_argument_type
1446
- * \brief The type of the function object's second argument.
1447
- */
1448
- typedef T2 second_argument_type;
1449
-
1450
- /*! \typedef result_type
1451
- * \brief The type of the function object's result;
1452
- */
1453
- typedef T2 result_type;
1454
-
1455
- /*! Function call operator. The return value is <tt>rhs</tt>.
1456
- */
1457
- __host__ __device__
1458
- constexpr const T2 &operator()(const T1 &/*lhs*/, const T2 &rhs) const
1459
- {
1460
- return rhs;
1461
- }
1462
- }; // end project2nd
1463
-
1464
- template <>
1465
- struct project2nd<void, void>
1466
- {
1467
- using is_transparent = void;
1468
- __thrust_exec_check_disable__
1469
- template <typename T1, typename T2>
1470
- __host__ __device__
1471
- constexpr auto operator()(T1&&, T2&& t2) const
1472
- noexcept(noexcept(THRUST_FWD(t2))) -> decltype(THRUST_FWD(t2))
1473
- {
1474
- return THRUST_FWD(t2);
1475
- }
1476
- };
1477
-
1478
- /*! \}
1479
- */
1480
-
1481
- // odds and ends
1482
-
1483
- /*! \addtogroup function_object_adaptors
1484
- * \{
1485
- */
1486
-
1487
- /*! \p unary_negate is a function object adaptor: it is an Adaptable Predicate
1488
- * that represents the logical negation of some other Adaptable Predicate.
1489
- * That is: if \c f is an object of class <tt>unary_negate<AdaptablePredicate></tt>,
1490
- * then there exists an object \c pred of class \c AdaptablePredicate such
1491
- * that <tt>f(x)</tt> always returns the same value as <tt>!pred(x)</tt>.
1492
- * There is rarely any reason to construct a <tt>unary_negate</tt> directly;
1493
- * it is almost always easier to use the helper function not1.
1494
- *
1495
- * \see http://www.sgi.com/tech/stl/unary_negate.html
1496
- * \see not1
1497
- */
1498
- template<typename Predicate>
1499
- struct unary_negate
1500
- : public thrust::unary_function<typename Predicate::argument_type, bool>
1501
- {
1502
- /*! Constructor takes a \p Predicate object to negate.
1503
- * \param p The \p Predicate object to negate.
1504
- */
1505
- __host__ __device__
1506
- explicit unary_negate(Predicate p) : pred(p){}
1507
-
1508
- /*! Function call operator. The return value is <tt>!pred(x)</tt>.
1509
- */
1510
- __thrust_exec_check_disable__
1511
- __host__ __device__
1512
- bool operator()(const typename Predicate::argument_type& x) { return !pred(x); }
1513
-
1514
- /*! \cond
1515
- */
1516
- Predicate pred;
1517
- /*! \endcond
1518
- */
1519
- }; // end unary_negate
1520
-
1521
- /*! \p not1 is a helper function to simplify the creation of Adaptable Predicates:
1522
- * it takes an Adaptable Predicate \p pred as an argument and returns a new Adaptable
1523
- * Predicate that represents the negation of \p pred. That is: if \c pred is an object
1524
- * of a type which models Adaptable Predicate, then the the type of the result
1525
- * \c npred of <tt>not1(pred)</tt> is also a model of Adaptable Predicate and
1526
- * <tt>npred(x)</tt> always returns the same value as <tt>!pred(x)</tt>.
1527
- *
1528
- * \param pred The Adaptable Predicate to negate.
1529
- * \return A new object, <tt>npred</tt> such that <tt>npred(x)</tt> always returns
1530
- * the same value as <tt>!pred(x)</tt>.
1531
- *
1532
- * \tparam Predicate is a model of <a href="http://www.sgi.com/tech/stl/AdaptablePredicate.html">Adaptable Predicate</a>.
1533
- *
1534
- * \see unary_negate
1535
- * \see not2
1536
- */
1537
- template<typename Predicate>
1538
- __host__ __device__
1539
- unary_negate<Predicate> not1(const Predicate &pred);
1540
-
1541
- /*! \p binary_negate is a function object adaptor: it is an Adaptable Binary
1542
- * Predicate that represents the logical negation of some other Adaptable
1543
- * Binary Predicate. That is: if \c f is an object of class <tt>binary_negate<AdaptablePredicate></tt>,
1544
- * then there exists an object \c pred of class \c AdaptableBinaryPredicate
1545
- * such that <tt>f(x,y)</tt> always returns the same value as <tt>!pred(x,y)</tt>.
1546
- * There is rarely any reason to construct a <tt>binary_negate</tt> directly;
1547
- * it is almost always easier to use the helper function not2.
1548
- *
1549
- * \see http://www.sgi.com/tech/stl/binary_negate.html
1550
- */
1551
- template<typename Predicate>
1552
- struct binary_negate
1553
- : public thrust::binary_function<typename Predicate::first_argument_type,
1554
- typename Predicate::second_argument_type,
1555
- bool>
1556
- {
1557
- /*! Constructor takes a \p Predicate object to negate.
1558
- * \param p The \p Predicate object to negate.
1559
- */
1560
- __host__ __device__
1561
- explicit binary_negate(Predicate p) : pred(p){}
1562
-
1563
- /*! Function call operator. The return value is <tt>!pred(x,y)</tt>.
1564
- */
1565
- __thrust_exec_check_disable__
1566
- __host__ __device__
1567
- bool operator()(const typename Predicate::first_argument_type& x, const typename Predicate::second_argument_type& y)
1568
- {
1569
- return !pred(x,y);
1570
- }
1571
-
1572
- /*! \cond
1573
- */
1574
- Predicate pred;
1575
- /*! \endcond
1576
- */
1577
- }; // end binary_negate
1578
-
1579
- /*! \p not2 is a helper function to simplify the creation of Adaptable Binary Predicates:
1580
- * it takes an Adaptable Binary Predicate \p pred as an argument and returns a new Adaptable
1581
- * Binary Predicate that represents the negation of \p pred. That is: if \c pred is an object
1582
- * of a type which models Adaptable Binary Predicate, then the the type of the result
1583
- * \c npred of <tt>not2(pred)</tt> is also a model of Adaptable Binary Predicate and
1584
- * <tt>npred(x,y)</tt> always returns the same value as <tt>!pred(x,y)</tt>.
1585
- *
1586
- * \param pred The Adaptable Binary Predicate to negate.
1587
- * \return A new object, <tt>npred</tt> such that <tt>npred(x,y)</tt> always returns
1588
- * the same value as <tt>!pred(x,y)</tt>.
1589
- *
1590
- * \tparam Binary Predicate is a model of <a href="http://www.sgi.com/tech/stl/AdaptableBinaryPredicate.html">Adaptable Binary Predicate</a>.
1591
- *
1592
- * \see binary_negate
1593
- * \see not1
1594
- */
1595
- template<typename BinaryPredicate>
1596
- __host__ __device__
1597
- binary_negate<BinaryPredicate> not2(const BinaryPredicate &pred);
1598
-
1599
- /*! \}
1600
- */
1601
-
1602
-
1603
- /*! \addtogroup placeholder_objects Placeholder Objects
1604
- * \ingroup function_objects
1605
- * \{
1606
- */
1607
-
1608
-
1609
- /*! \namespace thrust::placeholders
1610
- * \brief Facilities for constructing simple functions inline.
1611
- *
1612
- * Objects in the \p thrust::placeholders namespace may be used to create simple arithmetic functions inline
1613
- * in an algorithm invocation. Combining placeholders such as \p _1 and \p _2 with arithmetic operations such as \c +
1614
- * creates an unnamed function object which applies the operation to their arguments.
1615
- *
1616
- * The type of placeholder objects is implementation-defined.
1617
- *
1618
- * The following code snippet demonstrates how to use the placeholders \p _1 and \p _2 with \p thrust::transform
1619
- * to implement the SAXPY computation:
1620
- *
1621
- * \code
1622
- * #include <thrust/device_vector.h>
1623
- * #include <thrust/transform.h>
1624
- * #include <thrust/functional.h>
1625
- *
1626
- * int main()
1627
- * {
1628
- * thrust::device_vector<float> x(4), y(4);
1629
- * x[0] = 1;
1630
- * x[1] = 2;
1631
- * x[2] = 3;
1632
- * x[3] = 4;
1633
- *
1634
- * y[0] = 1;
1635
- * y[1] = 1;
1636
- * y[2] = 1;
1637
- * y[3] = 1;
1638
- *
1639
- * float a = 2.0f;
1640
- *
1641
- * using namespace thrust::placeholders;
1642
- *
1643
- * thrust::transform(x.begin(), x.end(), y.begin(), y.begin(),
1644
- * a * _1 + _2
1645
- * );
1646
- *
1647
- * // y is now {3, 5, 7, 9}
1648
- * }
1649
- * \endcode
1650
- */
1651
- namespace placeholders
1652
- {
1653
-
1654
-
1655
- /*! \p thrust::placeholders::_1 is the placeholder for the first function parameter.
1656
- */
1657
- THRUST_INLINE_CONSTANT thrust::detail::functional::placeholder<0>::type _1;
1658
-
1659
-
1660
- /*! \p thrust::placeholders::_2 is the placeholder for the second function parameter.
1661
- */
1662
- THRUST_INLINE_CONSTANT thrust::detail::functional::placeholder<1>::type _2;
1663
-
1664
-
1665
- /*! \p thrust::placeholders::_3 is the placeholder for the third function parameter.
1666
- */
1667
- THRUST_INLINE_CONSTANT thrust::detail::functional::placeholder<2>::type _3;
1668
-
1669
-
1670
- /*! \p thrust::placeholders::_4 is the placeholder for the fourth function parameter.
1671
- */
1672
- THRUST_INLINE_CONSTANT thrust::detail::functional::placeholder<3>::type _4;
1673
-
1674
-
1675
- /*! \p thrust::placeholders::_5 is the placeholder for the fifth function parameter.
1676
- */
1677
- THRUST_INLINE_CONSTANT thrust::detail::functional::placeholder<4>::type _5;
1678
-
1679
-
1680
- /*! \p thrust::placeholders::_6 is the placeholder for the sixth function parameter.
1681
- */
1682
- THRUST_INLINE_CONSTANT thrust::detail::functional::placeholder<5>::type _6;
1683
-
1684
-
1685
- /*! \p thrust::placeholders::_7 is the placeholder for the seventh function parameter.
1686
- */
1687
- THRUST_INLINE_CONSTANT thrust::detail::functional::placeholder<6>::type _7;
1688
-
1689
-
1690
- /*! \p thrust::placeholders::_8 is the placeholder for the eighth function parameter.
1691
- */
1692
- THRUST_INLINE_CONSTANT thrust::detail::functional::placeholder<7>::type _8;
1693
-
1694
-
1695
- /*! \p thrust::placeholders::_9 is the placeholder for the ninth function parameter.
1696
- */
1697
- THRUST_INLINE_CONSTANT thrust::detail::functional::placeholder<8>::type _9;
1698
-
1699
-
1700
- /*! \p thrust::placeholders::_10 is the placeholder for the tenth function parameter.
1701
- */
1702
- THRUST_INLINE_CONSTANT thrust::detail::functional::placeholder<9>::type _10;
1703
-
1704
-
1705
- } // end placeholders
1706
-
1707
-
1708
- /*! \} // placeholder_objects
1709
- */
1710
-
1711
- #undef THRUST_UNARY_FUNCTOR_VOID_SPECIALIZATION
1712
- #undef THRUST_BINARY_FUNCTOR_VOID_SPECIALIZATION
1713
- #undef THRUST_BINARY_FUNCTOR_VOID_SPECIALIZATION_OP
1714
-
1715
- } // end thrust
1716
-
1717
- #include <thrust/detail/functional.inl>
1718
- #include <thrust/detail/functional/operators.h>
1719
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/losses/ae_loss.py DELETED
@@ -1,102 +0,0 @@
1
- import mmcv
2
- import torch
3
- import torch.nn as nn
4
- import torch.nn.functional as F
5
-
6
- from ..builder import LOSSES
7
-
8
-
9
- @mmcv.jit(derivate=True, coderize=True)
10
- def ae_loss_per_image(tl_preds, br_preds, match):
11
- """Associative Embedding Loss in one image.
12
-
13
- Associative Embedding Loss including two parts: pull loss and push loss.
14
- Pull loss makes embedding vectors from same object closer to each other.
15
- Push loss distinguish embedding vector from different objects, and makes
16
- the gap between them is large enough.
17
-
18
- During computing, usually there are 3 cases:
19
- - no object in image: both pull loss and push loss will be 0.
20
- - one object in image: push loss will be 0 and pull loss is computed
21
- by the two corner of the only object.
22
- - more than one objects in image: pull loss is computed by corner pairs
23
- from each object, push loss is computed by each object with all
24
- other objects. We use confusion matrix with 0 in diagonal to
25
- compute the push loss.
26
-
27
- Args:
28
- tl_preds (tensor): Embedding feature map of left-top corner.
29
- br_preds (tensor): Embedding feature map of bottim-right corner.
30
- match (list): Downsampled coordinates pair of each ground truth box.
31
- """
32
-
33
- tl_list, br_list, me_list = [], [], []
34
- if len(match) == 0: # no object in image
35
- pull_loss = tl_preds.sum() * 0.
36
- push_loss = tl_preds.sum() * 0.
37
- else:
38
- for m in match:
39
- [tl_y, tl_x], [br_y, br_x] = m
40
- tl_e = tl_preds[:, tl_y, tl_x].view(-1, 1)
41
- br_e = br_preds[:, br_y, br_x].view(-1, 1)
42
- tl_list.append(tl_e)
43
- br_list.append(br_e)
44
- me_list.append((tl_e + br_e) / 2.0)
45
-
46
- tl_list = torch.cat(tl_list)
47
- br_list = torch.cat(br_list)
48
- me_list = torch.cat(me_list)
49
-
50
- assert tl_list.size() == br_list.size()
51
-
52
- # N is object number in image, M is dimension of embedding vector
53
- N, M = tl_list.size()
54
-
55
- pull_loss = (tl_list - me_list).pow(2) + (br_list - me_list).pow(2)
56
- pull_loss = pull_loss.sum() / N
57
-
58
- margin = 1 # exp setting of CornerNet, details in section 3.3 of paper
59
-
60
- # confusion matrix of push loss
61
- conf_mat = me_list.expand((N, N, M)).permute(1, 0, 2) - me_list
62
- conf_weight = 1 - torch.eye(N).type_as(me_list)
63
- conf_mat = conf_weight * (margin - conf_mat.sum(-1).abs())
64
-
65
- if N > 1: # more than one object in current image
66
- push_loss = F.relu(conf_mat).sum() / (N * (N - 1))
67
- else:
68
- push_loss = tl_preds.sum() * 0.
69
-
70
- return pull_loss, push_loss
71
-
72
-
73
- @LOSSES.register_module()
74
- class AssociativeEmbeddingLoss(nn.Module):
75
- """Associative Embedding Loss.
76
-
77
- More details can be found in
78
- `Associative Embedding <https://arxiv.org/abs/1611.05424>`_ and
79
- `CornerNet <https://arxiv.org/abs/1808.01244>`_ .
80
- Code is modified from `kp_utils.py <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L180>`_ # noqa: E501
81
-
82
- Args:
83
- pull_weight (float): Loss weight for corners from same object.
84
- push_weight (float): Loss weight for corners from different object.
85
- """
86
-
87
- def __init__(self, pull_weight=0.25, push_weight=0.25):
88
- super(AssociativeEmbeddingLoss, self).__init__()
89
- self.pull_weight = pull_weight
90
- self.push_weight = push_weight
91
-
92
- def forward(self, pred, target, match):
93
- """Forward function."""
94
- batch = pred.size(0)
95
- pull_all, push_all = 0.0, 0.0
96
- for i in range(batch):
97
- pull, push = ae_loss_per_image(pred[i], target[i], match[i])
98
-
99
- pull_all += self.pull_weight * pull
100
- push_all += self.push_weight * push
101
-
102
- return pull_all, push_all
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChihChiu29/mychatbot/README.md DELETED
@@ -1,11 +0,0 @@
1
- ---
2
- title: Mychatbot
3
- emoji: 📉
4
- colorFrom: blue
5
- colorTo: blue
6
- sdk: docker
7
- pinned: false
8
- license: gpl-2.0
9
- ---
10
-
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChrisPreston/diff-svc_minato_aqua/modules/nsf_hifigan/utils.py DELETED
@@ -1,69 +0,0 @@
1
- import glob
2
- import os
3
-
4
- import matplotlib
5
- import torch
6
- from torch.nn.utils import weight_norm
7
-
8
- matplotlib.use("Agg")
9
- import matplotlib.pylab as plt
10
-
11
-
12
- def plot_spectrogram(spectrogram):
13
- fig, ax = plt.subplots(figsize=(10, 2))
14
- im = ax.imshow(spectrogram, aspect="auto", origin="lower",
15
- interpolation='none')
16
- plt.colorbar(im, ax=ax)
17
-
18
- fig.canvas.draw()
19
- plt.close()
20
-
21
- return fig
22
-
23
-
24
- def init_weights(m, mean=0.0, std=0.01):
25
- classname = m.__class__.__name__
26
- if classname.find("Conv") != -1:
27
- m.weight.data.normal_(mean, std)
28
-
29
-
30
- def apply_weight_norm(m):
31
- classname = m.__class__.__name__
32
- if classname.find("Conv") != -1:
33
- weight_norm(m)
34
-
35
-
36
- def get_padding(kernel_size, dilation=1):
37
- return int((kernel_size * dilation - dilation) / 2)
38
-
39
-
40
- def load_checkpoint(filepath, device):
41
- assert os.path.isfile(filepath)
42
- print("Loading '{}'".format(filepath))
43
- checkpoint_dict = torch.load(filepath, map_location=device)
44
- print("Complete.")
45
- return checkpoint_dict
46
-
47
-
48
- def save_checkpoint(filepath, obj):
49
- print("Saving checkpoint to {}".format(filepath))
50
- torch.save(obj, filepath)
51
- print("Complete.")
52
-
53
-
54
- def del_old_checkpoints(cp_dir, prefix, n_models=2):
55
- pattern = os.path.join(cp_dir, prefix + '????????')
56
- cp_list = glob.glob(pattern) # get checkpoint paths
57
- cp_list = sorted(cp_list) # sort by iter
58
- if len(cp_list) > n_models: # if more than n_models models are found
59
- for cp in cp_list[:-n_models]: # delete the oldest models other than lastest n_models
60
- open(cp, 'w').close() # empty file contents
61
- os.unlink(cp) # delete file (move to trash when using Colab)
62
-
63
-
64
- def scan_checkpoint(cp_dir, prefix):
65
- pattern = os.path.join(cp_dir, prefix + '????????')
66
- cp_list = glob.glob(pattern)
67
- if len(cp_list) == 0:
68
- return None
69
- return sorted(cp_list)[-1]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DGSpitzer/DGS-Diffusion-Space/share_btn.py DELETED
@@ -1,72 +0,0 @@
1
- community_icon_html = """<svg id="share-btn-share-icon" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32">
2
- <path d="M20.6081 3C21.7684 3 22.8053 3.49196 23.5284 4.38415C23.9756 4.93678 24.4428 5.82749 24.4808 7.16133C24.9674 7.01707 25.4353 6.93643 25.8725 6.93643C26.9833 6.93643 27.9865 7.37587 28.696 8.17411C29.6075 9.19872 30.0124 10.4579 29.8361 11.7177C29.7523 12.3177 29.5581 12.8555 29.2678 13.3534C29.8798 13.8646 30.3306 14.5763 30.5485 15.4322C30.719 16.1032 30.8939 17.5006 29.9808 18.9403C30.0389 19.0342 30.0934 19.1319 30.1442 19.2318C30.6932 20.3074 30.7283 21.5229 30.2439 22.6548C29.5093 24.3704 27.6841 25.7219 24.1397 27.1727C21.9347 28.0753 19.9174 28.6523 19.8994 28.6575C16.9842 29.4379 14.3477 29.8345 12.0653 29.8345C7.87017 29.8345 4.8668 28.508 3.13831 25.8921C0.356375 21.6797 0.754104 17.8269 4.35369 14.1131C6.34591 12.058 7.67023 9.02782 7.94613 8.36275C8.50224 6.39343 9.97271 4.20438 12.4172 4.20438H12.4179C12.6236 4.20438 12.8314 4.2214 13.0364 4.25468C14.107 4.42854 15.0428 5.06476 15.7115 6.02205C16.4331 5.09583 17.134 4.359 17.7682 3.94323C18.7242 3.31737 19.6794 3 20.6081 3ZM20.6081 5.95917C20.2427 5.95917 19.7963 6.1197 19.3039 6.44225C17.7754 7.44319 14.8258 12.6772 13.7458 14.7131C13.3839 15.3952 12.7655 15.6837 12.2086 15.6837C11.1036 15.6837 10.2408 14.5497 12.1076 13.1085C14.9146 10.9402 13.9299 7.39584 12.5898 7.1776C12.5311 7.16799 12.4731 7.16355 12.4172 7.16355C11.1989 7.16355 10.6615 9.33114 10.6615 9.33114C10.6615 9.33114 9.0863 13.4148 6.38031 16.206C3.67434 18.998 3.5346 21.2388 5.50675 24.2246C6.85185 26.2606 9.42666 26.8753 12.0653 26.8753C14.8021 26.8753 17.6077 26.2139 19.1799 25.793C19.2574 25.7723 28.8193 22.984 27.6081 20.6107C27.4046 20.212 27.0693 20.0522 26.6471 20.0522C24.9416 20.0522 21.8393 22.6726 20.5057 22.6726C20.2076 22.6726 19.9976 22.5416 19.9116 22.222C19.3433 20.1173 28.552 19.2325 27.7758 16.1839C27.639 15.6445 27.2677 15.4256 26.746 15.4263C24.4923 15.4263 19.4358 19.5181 18.3759 19.5181C18.2949 19.5181 18.2368 19.4937 18.2053 19.4419C17.6743 18.557 17.9653 17.9394 21.7082 15.6009C25.4511 13.2617 28.0783 11.8545 26.5841 10.1752C26.4121 9.98141 26.1684 9.8956 25.8725 9.8956C23.6001 9.89634 18.2311 14.9403 18.2311 14.9403C18.2311 14.9403 16.7821 16.496 15.9057 16.496C15.7043 16.496 15.533 16.4139 15.4169 16.2112C14.7956 15.1296 21.1879 10.1286 21.5484 8.06535C21.7928 6.66715 21.3771 5.95917 20.6081 5.95917Z" fill="#FF9D00"></path>
3
- <path d="M5.50686 24.2246C3.53472 21.2387 3.67446 18.9979 6.38043 16.206C9.08641 13.4147 10.6615 9.33111 10.6615 9.33111C10.6615 9.33111 11.2499 6.95933 12.59 7.17757C13.93 7.39581 14.9139 10.9401 12.1069 13.1084C9.29997 15.276 12.6659 16.7489 13.7459 14.713C14.8258 12.6772 17.7747 7.44316 19.304 6.44221C20.8326 5.44128 21.9089 6.00204 21.5484 8.06532C21.188 10.1286 14.795 15.1295 15.4171 16.2118C16.0391 17.2934 18.2312 14.9402 18.2312 14.9402C18.2312 14.9402 25.0907 8.49588 26.5842 10.1752C28.0776 11.8545 25.4512 13.2616 21.7082 15.6008C17.9646 17.9393 17.6744 18.557 18.2054 19.4418C18.7372 20.3266 26.9998 13.1351 27.7759 16.1838C28.5513 19.2324 19.3434 20.1173 19.9117 22.2219C20.48 24.3274 26.3979 18.2382 27.6082 20.6107C28.8193 22.9839 19.2574 25.7722 19.18 25.7929C16.0914 26.62 8.24723 28.3726 5.50686 24.2246Z" fill="#FFD21E"></path>
4
- </svg>"""
5
-
6
- loading_icon_html = """<svg id="share-btn-loading-icon" style="display:none;" class="animate-spin"
7
- style="color: #ffffff;
8
- "
9
- xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" fill="none" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><circle style="opacity: 0.25;" cx="12" cy="12" r="10" stroke="white" stroke-width="4"></circle><path style="opacity: 0.75;" fill="white" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path></svg>"""
10
-
11
- share_js = """async () => {
12
- async function uploadFile(file){
13
- const UPLOAD_URL = 'https://huggingface.co/uploads';
14
- const response = await fetch(UPLOAD_URL, {
15
- method: 'POST',
16
- headers: {
17
- 'Content-Type': file.type,
18
- 'X-Requested-With': 'XMLHttpRequest',
19
- },
20
- body: file, /// <- File inherits from Blob
21
- });
22
- const url = await response.text();
23
- return url;
24
- }
25
- async function getOutputImgFile(imgEl){
26
- const res = await fetch(imgEl.src);
27
- const blob = await res.blob();
28
- const imgId = Date.now() % 200;
29
- const isPng = imgEl.src.startsWith(`data:image/png`);
30
- if(isPng){
31
- const fileName = `sd-perception-${{imgId}}.png`;
32
- return new File([blob], fileName, { type: 'image/png' });
33
- }else{
34
- const fileName = `sd-perception-${{imgId}}.jpg`;
35
- return new File([blob], fileName, { type: 'image/jpeg' });
36
- }
37
- }
38
- const gradioEl = document.querySelector('body > gradio-app');
39
- const outputImgEl = gradioEl.querySelector('#output-img img');
40
-
41
- //const outputImg_src = outputImgEl.src;
42
- //const outputImg_name = outputImg_src.split('/').pop();
43
- //let titleTxt = outputImg_name;
44
-
45
- let titleTxt = gradioEl.querySelector('#input-prompt > label > textarea').value;
46
- if(titleTxt.length > 100){
47
- titleTxt = titleTxt.slice(0, 100) + ' ...';
48
- }
49
- const shareBtnEl = gradioEl.querySelector('#share-btn');
50
- const shareIconEl = gradioEl.querySelector('#share-btn-share-icon');
51
- const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon');
52
- shareBtnEl.style.pointerEvents = 'none';
53
- shareIconEl.style.display = 'none';
54
- loadingIconEl.style.removeProperty('display');
55
- const outputFile= await getOutputImgFile(outputImgEl);
56
- const urlOutputImg = await uploadFile(outputFile);
57
-
58
- const descriptionMd = `#### Output Image:
59
- <img src='${urlOutputImg}' style='min-height: 704px;'>
60
- This is my cool character! Made with Cyberpunk Anime Diffusion AI!
61
- `;
62
- const params = new URLSearchParams({
63
- title: titleTxt,
64
- description: descriptionMd,
65
- });
66
- const paramsStr = params.toString();
67
- window.open(`https://huggingface.co/spaces/DGSpitzer/DGS-Diffusion-Space/discussions/new?${paramsStr}`, '_blank');
68
- shareBtnEl.style.removeProperty('pointer-events');
69
- shareIconEl.style.removeProperty('display');
70
- loadingIconEl.style.display = 'none';
71
- console.log(gradioEl.querySelector('#input-prompt'));
72
- }"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/_core/_synchronization.py DELETED
@@ -1,596 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from collections import deque
4
- from dataclasses import dataclass
5
- from types import TracebackType
6
- from warnings import warn
7
-
8
- from ..lowlevel import cancel_shielded_checkpoint, checkpoint, checkpoint_if_cancelled
9
- from ._compat import DeprecatedAwaitable
10
- from ._eventloop import get_asynclib
11
- from ._exceptions import BusyResourceError, WouldBlock
12
- from ._tasks import CancelScope
13
- from ._testing import TaskInfo, get_current_task
14
-
15
-
16
- @dataclass(frozen=True)
17
- class EventStatistics:
18
- """
19
- :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Event.wait`
20
- """
21
-
22
- tasks_waiting: int
23
-
24
-
25
- @dataclass(frozen=True)
26
- class CapacityLimiterStatistics:
27
- """
28
- :ivar int borrowed_tokens: number of tokens currently borrowed by tasks
29
- :ivar float total_tokens: total number of available tokens
30
- :ivar tuple borrowers: tasks or other objects currently holding tokens borrowed from this
31
- limiter
32
- :ivar int tasks_waiting: number of tasks waiting on :meth:`~.CapacityLimiter.acquire` or
33
- :meth:`~.CapacityLimiter.acquire_on_behalf_of`
34
- """
35
-
36
- borrowed_tokens: int
37
- total_tokens: float
38
- borrowers: tuple[object, ...]
39
- tasks_waiting: int
40
-
41
-
42
- @dataclass(frozen=True)
43
- class LockStatistics:
44
- """
45
- :ivar bool locked: flag indicating if this lock is locked or not
46
- :ivar ~anyio.TaskInfo owner: task currently holding the lock (or ``None`` if the lock is not
47
- held by any task)
48
- :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Lock.acquire`
49
- """
50
-
51
- locked: bool
52
- owner: TaskInfo | None
53
- tasks_waiting: int
54
-
55
-
56
- @dataclass(frozen=True)
57
- class ConditionStatistics:
58
- """
59
- :ivar int tasks_waiting: number of tasks blocked on :meth:`~.Condition.wait`
60
- :ivar ~anyio.LockStatistics lock_statistics: statistics of the underlying :class:`~.Lock`
61
- """
62
-
63
- tasks_waiting: int
64
- lock_statistics: LockStatistics
65
-
66
-
67
- @dataclass(frozen=True)
68
- class SemaphoreStatistics:
69
- """
70
- :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Semaphore.acquire`
71
-
72
- """
73
-
74
- tasks_waiting: int
75
-
76
-
77
- class Event:
78
- def __new__(cls) -> Event:
79
- return get_asynclib().Event()
80
-
81
- def set(self) -> DeprecatedAwaitable:
82
- """Set the flag, notifying all listeners."""
83
- raise NotImplementedError
84
-
85
- def is_set(self) -> bool:
86
- """Return ``True`` if the flag is set, ``False`` if not."""
87
- raise NotImplementedError
88
-
89
- async def wait(self) -> None:
90
- """
91
- Wait until the flag has been set.
92
-
93
- If the flag has already been set when this method is called, it returns immediately.
94
-
95
- """
96
- raise NotImplementedError
97
-
98
- def statistics(self) -> EventStatistics:
99
- """Return statistics about the current state of this event."""
100
- raise NotImplementedError
101
-
102
-
103
- class Lock:
104
- _owner_task: TaskInfo | None = None
105
-
106
- def __init__(self) -> None:
107
- self._waiters: deque[tuple[TaskInfo, Event]] = deque()
108
-
109
- async def __aenter__(self) -> None:
110
- await self.acquire()
111
-
112
- async def __aexit__(
113
- self,
114
- exc_type: type[BaseException] | None,
115
- exc_val: BaseException | None,
116
- exc_tb: TracebackType | None,
117
- ) -> None:
118
- self.release()
119
-
120
- async def acquire(self) -> None:
121
- """Acquire the lock."""
122
- await checkpoint_if_cancelled()
123
- try:
124
- self.acquire_nowait()
125
- except WouldBlock:
126
- task = get_current_task()
127
- event = Event()
128
- token = task, event
129
- self._waiters.append(token)
130
- try:
131
- await event.wait()
132
- except BaseException:
133
- if not event.is_set():
134
- self._waiters.remove(token)
135
- elif self._owner_task == task:
136
- self.release()
137
-
138
- raise
139
-
140
- assert self._owner_task == task
141
- else:
142
- try:
143
- await cancel_shielded_checkpoint()
144
- except BaseException:
145
- self.release()
146
- raise
147
-
148
- def acquire_nowait(self) -> None:
149
- """
150
- Acquire the lock, without blocking.
151
-
152
- :raises ~anyio.WouldBlock: if the operation would block
153
-
154
- """
155
- task = get_current_task()
156
- if self._owner_task == task:
157
- raise RuntimeError("Attempted to acquire an already held Lock")
158
-
159
- if self._owner_task is not None:
160
- raise WouldBlock
161
-
162
- self._owner_task = task
163
-
164
- def release(self) -> DeprecatedAwaitable:
165
- """Release the lock."""
166
- if self._owner_task != get_current_task():
167
- raise RuntimeError("The current task is not holding this lock")
168
-
169
- if self._waiters:
170
- self._owner_task, event = self._waiters.popleft()
171
- event.set()
172
- else:
173
- del self._owner_task
174
-
175
- return DeprecatedAwaitable(self.release)
176
-
177
- def locked(self) -> bool:
178
- """Return True if the lock is currently held."""
179
- return self._owner_task is not None
180
-
181
- def statistics(self) -> LockStatistics:
182
- """
183
- Return statistics about the current state of this lock.
184
-
185
- .. versionadded:: 3.0
186
- """
187
- return LockStatistics(self.locked(), self._owner_task, len(self._waiters))
188
-
189
-
190
- class Condition:
191
- _owner_task: TaskInfo | None = None
192
-
193
- def __init__(self, lock: Lock | None = None):
194
- self._lock = lock or Lock()
195
- self._waiters: deque[Event] = deque()
196
-
197
- async def __aenter__(self) -> None:
198
- await self.acquire()
199
-
200
- async def __aexit__(
201
- self,
202
- exc_type: type[BaseException] | None,
203
- exc_val: BaseException | None,
204
- exc_tb: TracebackType | None,
205
- ) -> None:
206
- self.release()
207
-
208
- def _check_acquired(self) -> None:
209
- if self._owner_task != get_current_task():
210
- raise RuntimeError("The current task is not holding the underlying lock")
211
-
212
- async def acquire(self) -> None:
213
- """Acquire the underlying lock."""
214
- await self._lock.acquire()
215
- self._owner_task = get_current_task()
216
-
217
- def acquire_nowait(self) -> None:
218
- """
219
- Acquire the underlying lock, without blocking.
220
-
221
- :raises ~anyio.WouldBlock: if the operation would block
222
-
223
- """
224
- self._lock.acquire_nowait()
225
- self._owner_task = get_current_task()
226
-
227
- def release(self) -> DeprecatedAwaitable:
228
- """Release the underlying lock."""
229
- self._lock.release()
230
- return DeprecatedAwaitable(self.release)
231
-
232
- def locked(self) -> bool:
233
- """Return True if the lock is set."""
234
- return self._lock.locked()
235
-
236
- def notify(self, n: int = 1) -> None:
237
- """Notify exactly n listeners."""
238
- self._check_acquired()
239
- for _ in range(n):
240
- try:
241
- event = self._waiters.popleft()
242
- except IndexError:
243
- break
244
-
245
- event.set()
246
-
247
- def notify_all(self) -> None:
248
- """Notify all the listeners."""
249
- self._check_acquired()
250
- for event in self._waiters:
251
- event.set()
252
-
253
- self._waiters.clear()
254
-
255
- async def wait(self) -> None:
256
- """Wait for a notification."""
257
- await checkpoint()
258
- event = Event()
259
- self._waiters.append(event)
260
- self.release()
261
- try:
262
- await event.wait()
263
- except BaseException:
264
- if not event.is_set():
265
- self._waiters.remove(event)
266
-
267
- raise
268
- finally:
269
- with CancelScope(shield=True):
270
- await self.acquire()
271
-
272
- def statistics(self) -> ConditionStatistics:
273
- """
274
- Return statistics about the current state of this condition.
275
-
276
- .. versionadded:: 3.0
277
- """
278
- return ConditionStatistics(len(self._waiters), self._lock.statistics())
279
-
280
-
281
- class Semaphore:
282
- def __init__(self, initial_value: int, *, max_value: int | None = None):
283
- if not isinstance(initial_value, int):
284
- raise TypeError("initial_value must be an integer")
285
- if initial_value < 0:
286
- raise ValueError("initial_value must be >= 0")
287
- if max_value is not None:
288
- if not isinstance(max_value, int):
289
- raise TypeError("max_value must be an integer or None")
290
- if max_value < initial_value:
291
- raise ValueError(
292
- "max_value must be equal to or higher than initial_value"
293
- )
294
-
295
- self._value = initial_value
296
- self._max_value = max_value
297
- self._waiters: deque[Event] = deque()
298
-
299
- async def __aenter__(self) -> Semaphore:
300
- await self.acquire()
301
- return self
302
-
303
- async def __aexit__(
304
- self,
305
- exc_type: type[BaseException] | None,
306
- exc_val: BaseException | None,
307
- exc_tb: TracebackType | None,
308
- ) -> None:
309
- self.release()
310
-
311
- async def acquire(self) -> None:
312
- """Decrement the semaphore value, blocking if necessary."""
313
- await checkpoint_if_cancelled()
314
- try:
315
- self.acquire_nowait()
316
- except WouldBlock:
317
- event = Event()
318
- self._waiters.append(event)
319
- try:
320
- await event.wait()
321
- except BaseException:
322
- if not event.is_set():
323
- self._waiters.remove(event)
324
- else:
325
- self.release()
326
-
327
- raise
328
- else:
329
- try:
330
- await cancel_shielded_checkpoint()
331
- except BaseException:
332
- self.release()
333
- raise
334
-
335
- def acquire_nowait(self) -> None:
336
- """
337
- Acquire the underlying lock, without blocking.
338
-
339
- :raises ~anyio.WouldBlock: if the operation would block
340
-
341
- """
342
- if self._value == 0:
343
- raise WouldBlock
344
-
345
- self._value -= 1
346
-
347
- def release(self) -> DeprecatedAwaitable:
348
- """Increment the semaphore value."""
349
- if self._max_value is not None and self._value == self._max_value:
350
- raise ValueError("semaphore released too many times")
351
-
352
- if self._waiters:
353
- self._waiters.popleft().set()
354
- else:
355
- self._value += 1
356
-
357
- return DeprecatedAwaitable(self.release)
358
-
359
- @property
360
- def value(self) -> int:
361
- """The current value of the semaphore."""
362
- return self._value
363
-
364
- @property
365
- def max_value(self) -> int | None:
366
- """The maximum value of the semaphore."""
367
- return self._max_value
368
-
369
- def statistics(self) -> SemaphoreStatistics:
370
- """
371
- Return statistics about the current state of this semaphore.
372
-
373
- .. versionadded:: 3.0
374
- """
375
- return SemaphoreStatistics(len(self._waiters))
376
-
377
-
378
- class CapacityLimiter:
379
- def __new__(cls, total_tokens: float) -> CapacityLimiter:
380
- return get_asynclib().CapacityLimiter(total_tokens)
381
-
382
- async def __aenter__(self) -> None:
383
- raise NotImplementedError
384
-
385
- async def __aexit__(
386
- self,
387
- exc_type: type[BaseException] | None,
388
- exc_val: BaseException | None,
389
- exc_tb: TracebackType | None,
390
- ) -> bool | None:
391
- raise NotImplementedError
392
-
393
- @property
394
- def total_tokens(self) -> float:
395
- """
396
- The total number of tokens available for borrowing.
397
-
398
- This is a read-write property. If the total number of tokens is increased, the
399
- proportionate number of tasks waiting on this limiter will be granted their tokens.
400
-
401
- .. versionchanged:: 3.0
402
- The property is now writable.
403
-
404
- """
405
- raise NotImplementedError
406
-
407
- @total_tokens.setter
408
- def total_tokens(self, value: float) -> None:
409
- raise NotImplementedError
410
-
411
- async def set_total_tokens(self, value: float) -> None:
412
- warn(
413
- "CapacityLimiter.set_total_tokens has been deprecated. Set the value of the"
414
- '"total_tokens" attribute directly.',
415
- DeprecationWarning,
416
- )
417
- self.total_tokens = value
418
-
419
- @property
420
- def borrowed_tokens(self) -> int:
421
- """The number of tokens that have currently been borrowed."""
422
- raise NotImplementedError
423
-
424
- @property
425
- def available_tokens(self) -> float:
426
- """The number of tokens currently available to be borrowed"""
427
- raise NotImplementedError
428
-
429
- def acquire_nowait(self) -> DeprecatedAwaitable:
430
- """
431
- Acquire a token for the current task without waiting for one to become available.
432
-
433
- :raises ~anyio.WouldBlock: if there are no tokens available for borrowing
434
-
435
- """
436
- raise NotImplementedError
437
-
438
- def acquire_on_behalf_of_nowait(self, borrower: object) -> DeprecatedAwaitable:
439
- """
440
- Acquire a token without waiting for one to become available.
441
-
442
- :param borrower: the entity borrowing a token
443
- :raises ~anyio.WouldBlock: if there are no tokens available for borrowing
444
-
445
- """
446
- raise NotImplementedError
447
-
448
- async def acquire(self) -> None:
449
- """
450
- Acquire a token for the current task, waiting if necessary for one to become available.
451
-
452
- """
453
- raise NotImplementedError
454
-
455
- async def acquire_on_behalf_of(self, borrower: object) -> None:
456
- """
457
- Acquire a token, waiting if necessary for one to become available.
458
-
459
- :param borrower: the entity borrowing a token
460
-
461
- """
462
- raise NotImplementedError
463
-
464
- def release(self) -> None:
465
- """
466
- Release the token held by the current task.
467
- :raises RuntimeError: if the current task has not borrowed a token from this limiter.
468
-
469
- """
470
- raise NotImplementedError
471
-
472
- def release_on_behalf_of(self, borrower: object) -> None:
473
- """
474
- Release the token held by the given borrower.
475
-
476
- :raises RuntimeError: if the borrower has not borrowed a token from this limiter.
477
-
478
- """
479
- raise NotImplementedError
480
-
481
- def statistics(self) -> CapacityLimiterStatistics:
482
- """
483
- Return statistics about the current state of this limiter.
484
-
485
- .. versionadded:: 3.0
486
-
487
- """
488
- raise NotImplementedError
489
-
490
-
491
- def create_lock() -> Lock:
492
- """
493
- Create an asynchronous lock.
494
-
495
- :return: a lock object
496
-
497
- .. deprecated:: 3.0
498
- Use :class:`~Lock` directly.
499
-
500
- """
501
- warn("create_lock() is deprecated -- use Lock() directly", DeprecationWarning)
502
- return Lock()
503
-
504
-
505
- def create_condition(lock: Lock | None = None) -> Condition:
506
- """
507
- Create an asynchronous condition.
508
-
509
- :param lock: the lock to base the condition object on
510
- :return: a condition object
511
-
512
- .. deprecated:: 3.0
513
- Use :class:`~Condition` directly.
514
-
515
- """
516
- warn(
517
- "create_condition() is deprecated -- use Condition() directly",
518
- DeprecationWarning,
519
- )
520
- return Condition(lock=lock)
521
-
522
-
523
- def create_event() -> Event:
524
- """
525
- Create an asynchronous event object.
526
-
527
- :return: an event object
528
-
529
- .. deprecated:: 3.0
530
- Use :class:`~Event` directly.
531
-
532
- """
533
- warn("create_event() is deprecated -- use Event() directly", DeprecationWarning)
534
- return get_asynclib().Event()
535
-
536
-
537
- def create_semaphore(value: int, *, max_value: int | None = None) -> Semaphore:
538
- """
539
- Create an asynchronous semaphore.
540
-
541
- :param value: the semaphore's initial value
542
- :param max_value: if set, makes this a "bounded" semaphore that raises :exc:`ValueError` if the
543
- semaphore's value would exceed this number
544
- :return: a semaphore object
545
-
546
- .. deprecated:: 3.0
547
- Use :class:`~Semaphore` directly.
548
-
549
- """
550
- warn(
551
- "create_semaphore() is deprecated -- use Semaphore() directly",
552
- DeprecationWarning,
553
- )
554
- return Semaphore(value, max_value=max_value)
555
-
556
-
557
- def create_capacity_limiter(total_tokens: float) -> CapacityLimiter:
558
- """
559
- Create a capacity limiter.
560
-
561
- :param total_tokens: the total number of tokens available for borrowing (can be an integer or
562
- :data:`math.inf`)
563
- :return: a capacity limiter object
564
-
565
- .. deprecated:: 3.0
566
- Use :class:`~CapacityLimiter` directly.
567
-
568
- """
569
- warn(
570
- "create_capacity_limiter() is deprecated -- use CapacityLimiter() directly",
571
- DeprecationWarning,
572
- )
573
- return get_asynclib().CapacityLimiter(total_tokens)
574
-
575
-
576
- class ResourceGuard:
577
- __slots__ = "action", "_guarded"
578
-
579
- def __init__(self, action: str):
580
- self.action = action
581
- self._guarded = False
582
-
583
- def __enter__(self) -> None:
584
- if self._guarded:
585
- raise BusyResourceError(self.action)
586
-
587
- self._guarded = True
588
-
589
- def __exit__(
590
- self,
591
- exc_type: type[BaseException] | None,
592
- exc_val: BaseException | None,
593
- exc_tb: TracebackType | None,
594
- ) -> bool | None:
595
- self._guarded = False
596
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/dateutil/tz/_factories.py DELETED
@@ -1,80 +0,0 @@
1
- from datetime import timedelta
2
- import weakref
3
- from collections import OrderedDict
4
-
5
- from six.moves import _thread
6
-
7
-
8
- class _TzSingleton(type):
9
- def __init__(cls, *args, **kwargs):
10
- cls.__instance = None
11
- super(_TzSingleton, cls).__init__(*args, **kwargs)
12
-
13
- def __call__(cls):
14
- if cls.__instance is None:
15
- cls.__instance = super(_TzSingleton, cls).__call__()
16
- return cls.__instance
17
-
18
-
19
- class _TzFactory(type):
20
- def instance(cls, *args, **kwargs):
21
- """Alternate constructor that returns a fresh instance"""
22
- return type.__call__(cls, *args, **kwargs)
23
-
24
-
25
- class _TzOffsetFactory(_TzFactory):
26
- def __init__(cls, *args, **kwargs):
27
- cls.__instances = weakref.WeakValueDictionary()
28
- cls.__strong_cache = OrderedDict()
29
- cls.__strong_cache_size = 8
30
-
31
- cls._cache_lock = _thread.allocate_lock()
32
-
33
- def __call__(cls, name, offset):
34
- if isinstance(offset, timedelta):
35
- key = (name, offset.total_seconds())
36
- else:
37
- key = (name, offset)
38
-
39
- instance = cls.__instances.get(key, None)
40
- if instance is None:
41
- instance = cls.__instances.setdefault(key,
42
- cls.instance(name, offset))
43
-
44
- # This lock may not be necessary in Python 3. See GH issue #901
45
- with cls._cache_lock:
46
- cls.__strong_cache[key] = cls.__strong_cache.pop(key, instance)
47
-
48
- # Remove an item if the strong cache is overpopulated
49
- if len(cls.__strong_cache) > cls.__strong_cache_size:
50
- cls.__strong_cache.popitem(last=False)
51
-
52
- return instance
53
-
54
-
55
- class _TzStrFactory(_TzFactory):
56
- def __init__(cls, *args, **kwargs):
57
- cls.__instances = weakref.WeakValueDictionary()
58
- cls.__strong_cache = OrderedDict()
59
- cls.__strong_cache_size = 8
60
-
61
- cls.__cache_lock = _thread.allocate_lock()
62
-
63
- def __call__(cls, s, posix_offset=False):
64
- key = (s, posix_offset)
65
- instance = cls.__instances.get(key, None)
66
-
67
- if instance is None:
68
- instance = cls.__instances.setdefault(key,
69
- cls.instance(s, posix_offset))
70
-
71
- # This lock may not be necessary in Python 3. See GH issue #901
72
- with cls.__cache_lock:
73
- cls.__strong_cache[key] = cls.__strong_cache.pop(key, instance)
74
-
75
- # Remove an item if the strong cache is overpopulated
76
- if len(cls.__strong_cache) > cls.__strong_cache_size:
77
- cls.__strong_cache.popitem(last=False)
78
-
79
- return instance
80
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/feaLib/error.py DELETED
@@ -1,22 +0,0 @@
1
- class FeatureLibError(Exception):
2
- def __init__(self, message, location):
3
- Exception.__init__(self, message)
4
- self.location = location
5
-
6
- def __str__(self):
7
- message = Exception.__str__(self)
8
- if self.location:
9
- return f"{self.location}: {message}"
10
- else:
11
- return message
12
-
13
-
14
- class IncludedFeaNotFound(FeatureLibError):
15
- def __str__(self):
16
- assert self.location is not None
17
-
18
- message = (
19
- "The following feature file should be included but cannot be found: "
20
- f"{Exception.__str__(self)}"
21
- )
22
- return f"{self.location}: {message}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-ecdf43f2.js DELETED
@@ -1,2 +0,0 @@
1
- import{S as Q,e as Z,s as W,J as Y,K as g,p as L,M as C,n as G,A as M,Z as me,N as z,O as U,m as ge,Q as q,z as N,u as ne,v as R,y as te,a1 as we,B as be,G as x,L as H,af as ve,ao as oe,V as ke,P as ie,U as E,R as se,h as P,j as ee,k as I,o as K,ap as ue,t as le,x as V,am as Se,E as Ee,ae as Ne,q as ye,r as Re,F as X}from"./index-1d65707a.js";/* empty css */import{b as fe,B as Te}from"./Button-f155035a.js";import{B as ze}from"./BlockTitle-dee077e8.js";import"./Info-7c6961ef.js";function Je(n){let e,l;return{c(){e=Y("svg"),l=Y("path"),g(l,"d","M5 8l4 4 4-4z"),g(e,"class","dropdown-arrow svelte-p5edak"),g(e,"xmlns","http://www.w3.org/2000/svg"),g(e,"width","18"),g(e,"height","18"),g(e,"viewBox","0 0 18 18")},m(i,o){L(i,e,o),C(e,l)},p:G,i:G,o:G,d(i){i&&M(e)}}}class Le extends Q{constructor(e){super(),Z(this,e,null,Je,W,{})}}function Me(n){let e,l;return{c(){e=Y("svg"),l=Y("path"),g(l,"d","M19 6.41L17.59 5 12 10.59 6.41 5 5 6.41 10.59 12 5 17.59 6.41 19 12 13.41 17.59 19 19 17.59 13.41 12z"),g(e,"xmlns","http://www.w3.org/2000/svg"),g(e,"width","16"),g(e,"height","16"),g(e,"viewBox","0 0 24 24")},m(i,o){L(i,e,o),C(e,l)},p:G,i:G,o:G,d(i){i&&M(e)}}}class Oe extends Q{constructor(e){super(),Z(this,e,null,Me,W,{})}}function ae(n,e,l){const i=n.slice();return i[24]=e[l],i}function re(n){let e,l,i,o,d,t=x(n[0]),u=[];for(let s=0;s<t.length;s+=1)u[s]=_e(ae(n,t,s));return{c(){e=z("ul");for(let s=0;s<u.length;s+=1)u[s].c();g(e,"class","options svelte-1aonegi"),g(e,"aria-expanded",n[1]),H(e,"top",n[7]),H(e,"bottom",n[8]),H(e,"max-height",`calc(${n[9]}px - var(--window-padding))`),H(e,"width",n[6]+"px")},m(s,f){L(s,e,f);for(let a=0;a<u.length;a+=1)u[a]&&u[a].m(e,null);n[21](e),i=!0,o||(d=q(e,"mousedown",ve(n[20])),o=!0)},p(s,f){if(f&2053){t=x(s[0]);let a;for(a=0;a<t.length;a+=1){const m=ae(s,t,a);u[a]?u[a].p(m,f):(u[a]=_e(m),u[a].c(),u[a].m(e,null))}for(;a<u.length;a+=1)u[a].d(1);u.length=t.length}(!i||f&2)&&g(e,"aria-expanded",s[1]),f&128&&H(e,"top",s[7]),f&256&&H(e,"bottom",s[8]),f&512&&H(e,"max-height",`calc(${s[9]}px - var(--window-padding))`),f&64&&H(e,"width",s[6]+"px")},i(s){i||(s&&me(()=>{i&&(l||(l=oe(e,fe,{duration:200,y:5},!0)),l.run(1))}),i=!0)},o(s){s&&(l||(l=oe(e,fe,{duration:200,y:5},!1)),l.run(0)),i=!1},d(s){s&&M(e),ke(u,s),n[21](null),s&&l&&l.end(),o=!1,d()}}}function _e(n){let e,l,i,o=n[24]+"",d,t,u,s;return{c(){e=z("li"),l=z("span"),l.textContent="✓",i=U(),d=ie(o),t=U(),g(l,"class","inner-item svelte-1aonegi"),E(l,"hide",!n[11].includes(n[24])),g(e,"class","item svelte-1aonegi"),g(e,"role","button"),g(e,"data-value",u=n[24]),g(e,"aria-label",s=n[24]),E(e,"selected",n[11].includes(n[24])),E(e,"active",n[2]===n[24]),E(e,"bg-gray-100",n[2]===n[24]),E(e,"dark:bg-gray-600",n[2]===n[24])},m(f,a){L(f,e,a),C(e,l),C(e,i),C(e,d),C(e,t)},p(f,a){a&2049&&E(l,"hide",!f[11].includes(f[24])),a&1&&o!==(o=f[24]+"")&&se(d,o),a&1&&u!==(u=f[24])&&g(e,"data-value",u),a&1&&s!==(s=f[24])&&g(e,"aria-label",s),a&2049&&E(e,"selected",f[11].includes(f[24])),a&5&&E(e,"active",f[2]===f[24]),a&5&&E(e,"bg-gray-100",f[2]===f[24]),a&5&&E(e,"dark:bg-gray-600",f[2]===f[24])},d(f){f&&M(e)}}}function Ue(n){let e,l,i,o,d;me(n[18]);let t=n[1]&&!n[3]&&re(n);return{c(){e=z("div"),l=U(),t&&t.c(),i=ge(),g(e,"class","reference")},m(u,s){L(u,e,s),n[19](e),L(u,l,s),t&&t.m(u,s),L(u,i,s),o||(d=[q(window,"scroll",n[12]),q(window,"resize",n[18])],o=!0)},p(u,[s]){u[1]&&!u[3]?t?(t.p(u,s),s&10&&N(t,1)):(t=re(u),t.c(),N(t,1),t.m(i.parentNode,i)):t&&(ne(),R(t,1,1,()=>{t=null}),te())},i(u){N(t)},o(u){R(t)},d(u){u&&(M(e),M(l),M(i)),n[19](null),t&&t.d(u),o=!1,we(d)}}}function qe(n,e,l){let i,{value:o=void 0}=e,{filtered:d}=e,{showOptions:t=!1}=e,{activeOption:u}=e,{disabled:s=!1}=e,f,a,m,_,w,A,b,B,v,k;const S=()=>{const{top:O,bottom:F}=w.getBoundingClientRect();l(15,f=O),l(16,a=k-F)};let y=null;const D=()=>{t&&(y!==null&&clearTimeout(y),y=setTimeout(()=>{S(),y=null},10))},j=be();function J(){l(10,k=window.innerHeight)}function h(O){P[O?"unshift":"push"](()=>{w=O,l(4,w)})}const p=O=>j("change",O);function r(O){P[O?"unshift":"push"](()=>{A=O,l(5,A)})}return n.$$set=O=>{"value"in O&&l(14,o=O.value),"filtered"in O&&l(0,d=O.filtered),"showOptions"in O&&l(1,t=O.showOptions),"activeOption"in O&&l(2,u=O.activeOption),"disabled"in O&&l(3,s=O.disabled)},n.$$.update=()=>{if(n.$$.dirty&245810){if(t&&w){if(A&&typeof o=="string"){let F=document.querySelector(`li[data-value="${o}"]`);F&&A.scrollTo(0,F.offsetTop)}S();const O=w.parentElement?.getBoundingClientRect();l(17,m=O?.height||0),l(6,_=O?.width||0)}a>f?(l(7,b=`${f}px`),l(9,v=a),l(8,B=null)):(l(8,B=`${a+m}px`),l(9,v=f-m),l(7,b=null))}n.$$.dirty&16384&&l(11,i=Array.isArray(o)?o:[o])},[d,t,u,s,w,A,_,b,B,v,k,i,D,j,o,f,a,m,J,h,p,r]}class je extends Q{constructor(e){super(),Z(this,e,qe,Ue,W,{value:14,filtered:0,showOptions:1,activeOption:2,disabled:3})}}function ce(n,e,l){const i=n.slice();return i[31]=e[l],i}function He(n){let e;return{c(){e=ie(n[1])},m(l,i){L(l,e,i)},p(l,i){i[0]&2&&se(e,l[1])},d(l){l&&M(e)}}}function de(n){let e,l,i=x(n[0]),o=[];for(let t=0;t<i.length;t+=1)o[t]=he(ce(n,i,t));const d=t=>R(o[t],1,1,()=>{o[t]=null});return{c(){for(let t=0;t<o.length;t+=1)o[t].c();e=ge()},m(t,u){for(let s=0;s<o.length;s+=1)o[s]&&o[s].m(t,u);L(t,e,u),l=!0},p(t,u){if(u[0]&8209){i=x(t[0]);let s;for(s=0;s<i.length;s+=1){const f=ce(t,i,s);o[s]?(o[s].p(f,u),N(o[s],1)):(o[s]=he(f),o[s].c(),N(o[s],1),o[s].m(e.parentNode,e))}for(ne(),s=i.length;s<o.length;s+=1)d(s);te()}},i(t){if(!l){for(let u=0;u<i.length;u+=1)N(o[u]);l=!0}},o(t){o=o.filter(Boolean);for(let u=0;u<o.length;u+=1)R(o[u]);l=!1},d(t){t&&M(e),ke(o,t)}}}function he(n){let e,l,i=n[31]+"",o,d,t,u,s,f,a,m,_;u=new Oe({});function w(){return n[21](n[31])}return{c(){e=z("div"),l=z("span"),o=ie(i),d=U(),t=z("div"),I(u.$$.fragment),f=U(),g(l,"class","svelte-c0u3f0"),g(t,"class","token-remove svelte-c0u3f0"),g(t,"title",s="Remove "+n[31]),E(t,"hidden",n[4]),g(e,"class","token svelte-c0u3f0")},m(A,b){L(A,e,b),C(e,l),C(l,o),C(e,d),C(e,t),K(u,t,null),C(e,f),a=!0,m||(_=q(e,"click",ve(w)),m=!0)},p(A,b){n=A,(!a||b[0]&1)&&i!==(i=n[31]+"")&&se(o,i),(!a||b[0]&1&&s!==(s="Remove "+n[31]))&&g(t,"title",s),(!a||b[0]&16)&&E(t,"hidden",n[4])},i(A){a||(N(u.$$.fragment,A),a=!0)},o(A){R(u.$$.fragment,A),a=!1},d(A){A&&M(e),V(u),m=!1,_()}}}function Ie(n){let e,l,i,o,d,t=n[3]&&Array.isArray(n[0]),u,s,f,a,m,_,w,A,b,B,v,k,S,y;l=new ze({props:{show_label:n[5],info:n[2],$$slots:{default:[He]},$$scope:{ctx:n}}});let D=t&&de(n);_=new Oe({}),A=new Le({});function j(h){n[27](h)}let J={showOptions:n[11],filtered:n[10],activeOption:n[9],disabled:n[4]};return n[0]!==void 0&&(J.value=n[0]),B=new je({props:J}),P.push(()=>ee(B,"value",j)),B.$on("change",n[15]),{c(){e=z("label"),I(l.$$.fragment),i=U(),o=z("div"),d=z("div"),D&&D.c(),u=U(),s=z("div"),f=z("input"),a=U(),m=z("div"),I(_.$$.fragment),w=U(),I(A.$$.fragment),b=U(),I(B.$$.fragment),g(f,"class","border-none svelte-c0u3f0"),f.disabled=n[4],g(f,"autocomplete","off"),E(f,"subdued",n[0]!==n[8]&&!n[7]),g(m,"class","token-remove remove-all svelte-c0u3f0"),g(m,"title","Clear"),E(m,"hide",!n[3]||!n[0]?.length||n[4]),g(s,"class","secondary-wrap svelte-c0u3f0"),g(d,"class","wrap-inner svelte-c0u3f0"),E(d,"showOptions",n[11]),g(o,"class","wrap svelte-c0u3f0"),g(e,"class","svelte-c0u3f0"),E(e,"container",n[6])},m(h,p){L(h,e,p),K(l,e,null),C(e,i),C(e,o),C(o,d),D&&D.m(d,null),C(d,u),C(d,s),C(s,f),ue(f,n[8]),n[23](f),C(s,a),C(s,m),K(_,m,null),C(s,w),K(A,s,null),C(o,b),K(B,o,null),k=!0,S||(y=[q(f,"input",n[22]),q(f,"focus",n[24]),q(f,"keydown",n[16]),q(f,"keyup",n[25]),q(f,"blur",n[26]),q(m,"click",n[14])],S=!0)},p(h,p){const r={};p[0]&32&&(r.show_label=h[5]),p[0]&4&&(r.info=h[2]),p[0]&2|p[1]&8&&(r.$$scope={dirty:p,ctx:h}),l.$set(r),p[0]&9&&(t=h[3]&&Array.isArray(h[0])),t?D?(D.p(h,p),p[0]&9&&N(D,1)):(D=de(h),D.c(),N(D,1),D.m(d,u)):D&&(ne(),R(D,1,1,()=>{D=null}),te()),(!k||p[0]&16)&&(f.disabled=h[4]),p[0]&256&&f.value!==h[8]&&ue(f,h[8]),(!k||p[0]&385)&&E(f,"subdued",h[0]!==h[8]&&!h[7]),(!k||p[0]&25)&&E(m,"hide",!h[3]||!h[0]?.length||h[4]),(!k||p[0]&2048)&&E(d,"showOptions",h[11]);const O={};p[0]&2048&&(O.showOptions=h[11]),p[0]&1024&&(O.filtered=h[10]),p[0]&512&&(O.activeOption=h[9]),p[0]&16&&(O.disabled=h[4]),!v&&p[0]&1&&(v=!0,O.value=h[0],le(()=>v=!1)),B.$set(O),(!k||p[0]&64)&&E(e,"container",h[6])},i(h){k||(N(l.$$.fragment,h),N(D),N(_.$$.fragment,h),N(A.$$.fragment,h),N(B.$$.fragment,h),k=!0)},o(h){R(l.$$.fragment,h),R(D),R(_.$$.fragment,h),R(A.$$.fragment,h),R(B.$$.fragment,h),k=!1},d(h){h&&M(e),V(l),D&&D.d(),n[23](null),V(_),V(A),V(B),S=!1,we(y)}}}function Ke(n,e,l){let i,{label:o}=e,{info:d=void 0}=e,{value:t}=e,u=Array.isArray(t)?t.slice():t,{value_is_output:s=!1}=e,{multiselect:f=!1}=e,{max_choices:a}=e,{choices:m}=e,{disabled:_=!1}=e,{show_label:w}=e,{container:A=!0}=e,{allow_custom_value:b=!1}=e;const B=be();let v,k,S=!1,y;function D(){B("change",t),s||B("input")}Se(()=>{l(17,s=!1)});function j(c){l(0,t),(!a||t.length<a)&&(t.push(c),B("select",{index:m.indexOf(c),value:c,selected:!0})),l(0,t)}function J(c){l(0,t),l(0,t=t.filter(T=>T!==c)),B("select",{index:m.indexOf(c),value:c,selected:!1})}function h(c){l(0,t=[]),l(8,v=""),c.preventDefault()}function p(c){const T=c.detail.target.dataset.value;if(b&&l(8,v=T),T!==void 0)if(f)t?.includes(T)?J(T):j(T),l(8,v="");else{l(0,t=T),l(8,v=T),l(11,S=!1),B("select",{index:m.indexOf(T),value:T,selected:!0});return}}function r(c){if(c.key==="Enter"&&k!=null)f?f&&Array.isArray(t)&&(t.includes(k)?J(k):j(k),l(8,v="")):(t!==k&&(l(0,t=k),B("select",{index:m.indexOf(t),value:t,selected:!0})),l(8,v=k),l(11,S=!1));else if(l(11,S=!0),c.key==="ArrowUp"||c.key==="ArrowDown"){k===null&&l(9,k=i[0]);const T=c.key==="ArrowUp"?-1:1,$=i.indexOf(k)+T;l(9,k=$<0?i[i.length-1]:$===i.length?i[0]:i[$]),c.preventDefault()}else c.key==="Escape"?l(11,S=!1):c.key==="Backspace"?f&&(!v||v==="")&&Array.isArray(t)&&t.length>0&&(J(t[t.length-1]),l(8,v="")):l(11,S=!0)}const O=c=>J(c);function F(){v=this.value,l(8,v),l(0,t)}function Ae(c){P[c?"unshift":"push"](()=>{y=c,l(12,y)})}const pe=()=>{l(11,S=!S),S?l(8,v=""):y.blur()},Be=()=>{b&&l(0,t=v)},De=()=>{f?l(8,v=""):b||t!==v&&(typeof t=="string"&&v==""?l(8,v=t):(l(0,t=void 0),l(8,v=""))),l(11,S=!1)};function Ce(c){t=c,l(0,t)}return n.$$set=c=>{"label"in c&&l(1,o=c.label),"info"in c&&l(2,d=c.info),"value"in c&&l(0,t=c.value),"value_is_output"in c&&l(17,s=c.value_is_output),"multiselect"in c&&l(3,f=c.multiselect),"max_choices"in c&&l(18,a=c.max_choices),"choices"in c&&l(19,m=c.choices),"disabled"in c&&l(4,_=c.disabled),"show_label"in c&&l(5,w=c.show_label),"container"in c&&l(6,A=c.container),"allow_custom_value"in c&&l(7,b=c.allow_custom_value)},n.$$.update=()=>{n.$$.dirty[0]&1&&(typeof t=="string"||t===null)&&l(8,v=t),n.$$.dirty[0]&524544&&l(10,i=m.filter(c=>v?c.toLowerCase().includes(v.toLowerCase()):c)),n.$$.dirty[0]&1536&&(!k||!i.includes(k))&&l(9,k=i.length?i[0]:null),n.$$.dirty[0]&1048577&&JSON.stringify(t)!=JSON.stringify(u)&&(l(20,u=Array.isArray(t)?t.slice():t),D()),n.$$.dirty[0]&1048577&&JSON.stringify(t)!=JSON.stringify(u)&&(B("change",t),l(20,u=Array.isArray(t)?t.slice():t))},[t,o,d,f,_,w,A,b,v,k,i,S,y,J,h,p,r,s,a,m,u,O,F,Ae,pe,Be,De,Ce]}class Ve extends Q{constructor(e){super(),Z(this,e,Ke,Ie,W,{label:1,info:2,value:0,value_is_output:17,multiselect:3,max_choices:18,choices:19,disabled:4,show_label:5,container:6,allow_custom_value:7},null,[-1,-1])}}function Fe(n){let e,l,i,o,d,t;const u=[n[14]];let s={};for(let _=0;_<u.length;_+=1)s=Ee(s,u[_]);e=new Ne({props:s});function f(_){n[17](_)}function a(_){n[18](_)}let m={choices:n[9],multiselect:n[7],max_choices:n[8],label:n[2],info:n[3],show_label:n[10],allow_custom_value:n[15],container:n[11],disabled:n[16]==="static"};return n[0]!==void 0&&(m.value=n[0]),n[1]!==void 0&&(m.value_is_output=n[1]),i=new Ve({props:m}),P.push(()=>ee(i,"value",f)),P.push(()=>ee(i,"value_is_output",a)),i.$on("change",n[19]),i.$on("input",n[20]),i.$on("select",n[21]),i.$on("blur",n[22]),{c(){I(e.$$.fragment),l=U(),I(i.$$.fragment)},m(_,w){K(e,_,w),L(_,l,w),K(i,_,w),t=!0},p(_,w){const A=w&16384?ye(u,[Re(_[14])]):{};e.$set(A);const b={};w&512&&(b.choices=_[9]),w&128&&(b.multiselect=_[7]),w&256&&(b.max_choices=_[8]),w&4&&(b.label=_[2]),w&8&&(b.info=_[3]),w&1024&&(b.show_label=_[10]),w&32768&&(b.allow_custom_value=_[15]),w&2048&&(b.container=_[11]),w&65536&&(b.disabled=_[16]==="static"),!o&&w&1&&(o=!0,b.value=_[0],le(()=>o=!1)),!d&&w&2&&(d=!0,b.value_is_output=_[1],le(()=>d=!1)),i.$set(b)},i(_){t||(N(e.$$.fragment,_),N(i.$$.fragment,_),t=!0)},o(_){R(e.$$.fragment,_),R(i.$$.fragment,_),t=!1},d(_){_&&M(l),V(e,_),V(i,_)}}}function Ge(n){let e,l;return e=new Te({props:{visible:n[6],elem_id:n[4],elem_classes:n[5],padding:n[11],allow_overflow:!1,scale:n[12],min_width:n[13],$$slots:{default:[Fe]},$$scope:{ctx:n}}}),{c(){I(e.$$.fragment)},m(i,o){K(e,i,o),l=!0},p(i,[o]){const d={};o&64&&(d.visible=i[6]),o&16&&(d.elem_id=i[4]),o&32&&(d.elem_classes=i[5]),o&2048&&(d.padding=i[11]),o&4096&&(d.scale=i[12]),o&8192&&(d.min_width=i[13]),o&8507279&&(d.$$scope={dirty:o,ctx:i}),e.$set(d)},i(i){l||(N(e.$$.fragment,i),l=!0)},o(i){R(e.$$.fragment,i),l=!1},d(i){V(e,i)}}}function Pe(n,e,l){let{label:i="Dropdown"}=e,{info:o=void 0}=e,{elem_id:d=""}=e,{elem_classes:t=[]}=e,{visible:u=!0}=e,{value:s}=e,{value_is_output:f=!1}=e,{multiselect:a=!1}=e,{max_choices:m}=e,{choices:_}=e,{show_label:w}=e,{container:A=!0}=e,{scale:b=null}=e,{min_width:B=void 0}=e,{loading_status:v}=e,{allow_custom_value:k=!1}=e,{mode:S}=e;a&&!s?s=[]:s||(s="");function y(r){s=r,l(0,s)}function D(r){f=r,l(1,f)}function j(r){X.call(this,n,r)}function J(r){X.call(this,n,r)}function h(r){X.call(this,n,r)}function p(r){X.call(this,n,r)}return n.$$set=r=>{"label"in r&&l(2,i=r.label),"info"in r&&l(3,o=r.info),"elem_id"in r&&l(4,d=r.elem_id),"elem_classes"in r&&l(5,t=r.elem_classes),"visible"in r&&l(6,u=r.visible),"value"in r&&l(0,s=r.value),"value_is_output"in r&&l(1,f=r.value_is_output),"multiselect"in r&&l(7,a=r.multiselect),"max_choices"in r&&l(8,m=r.max_choices),"choices"in r&&l(9,_=r.choices),"show_label"in r&&l(10,w=r.show_label),"container"in r&&l(11,A=r.container),"scale"in r&&l(12,b=r.scale),"min_width"in r&&l(13,B=r.min_width),"loading_status"in r&&l(14,v=r.loading_status),"allow_custom_value"in r&&l(15,k=r.allow_custom_value),"mode"in r&&l(16,S=r.mode)},[s,f,i,o,d,t,u,a,m,_,w,A,b,B,v,k,S,y,D,j,J,h,p]}class Qe extends Q{constructor(e){super(),Z(this,e,Pe,Ge,W,{label:2,info:3,elem_id:4,elem_classes:5,visible:6,value:0,value_is_output:1,multiselect:7,max_choices:8,choices:9,show_label:10,container:11,scale:12,min_width:13,loading_status:14,allow_custom_value:15,mode:16})}}const $e=Qe,el=["static","dynamic"],ll=n=>({type:{payload:"string"},description:{payload:"selected choice"},example_data:n.choices.length?n.choices[0]:""});export{$e as Component,ll as document,el as modes};
2
- //# sourceMappingURL=index-ecdf43f2.js.map