parquet-converter commited on
Commit
f81d814
·
1 Parent(s): 97eca09

Update parquet files (step 87 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/101-5/gpt4free/g4f/.v1/gpt4free/you/README.md +0 -38
  2. spaces/101-5/gpt4free/g4f/.v1/unfinished/bing/__ini__.py +0 -108
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Visual Studio 2015 for Windows and Become a Master of Multiple Platforms.md +0 -32
  4. spaces/1gistliPinn/ChatGPT4/Examples/Borderlands 2 Crack Only-SKIDROW Cheat Engine LINK.md +0 -6
  5. spaces/1gistliPinn/ChatGPT4/Examples/Cricket Batting Tips In Tamil Pdf 26 __EXCLUSIVE__.md +0 -6
  6. spaces/1phancelerku/anime-remove-background/Como baixar Solar Smash APK e jogar o simulador de destruio do planeta mais divertido.md +0 -148
  7. spaces/801artistry/RVC801/utils/backups_test.py +0 -138
  8. spaces/AFRAC/NCM_DEMO/app.py +0 -55
  9. spaces/AIFILMS/StyleGANEX/utils/common.py +0 -87
  10. spaces/AIatUIUC/CodeLATS/app.py +0 -109
  11. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/voc/yolov5_x-v61_fast_1xb32-50e_voc.py +0 -26
  12. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnest269.py +0 -25
  13. spaces/AchyuthGamer/OpenGPT/g4f/Provider/needs_auth/HuggingChat.py +0 -74
  14. spaces/Adapter/CoAdapter/ldm/data/utils.py +0 -60
  15. spaces/AgentVerse/agentVerse/agentverse/environments/tasksolving_env/rules/executor/code_test.py +0 -111
  16. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/clickoutside/ClickOutside.js +0 -2
  17. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridtable/SetItems.js +0 -16
  18. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/modal/Modal.js +0 -2
  19. spaces/AhmedM20/Email_Marketing_Content_Generator/app.py +0 -68
  20. spaces/AkitoP/umamusume_bert_vits2/text/japanese.py +0 -704
  21. spaces/Akmyradov/TurkmenTTSweSTT/uroman/README.md +0 -165
  22. spaces/AlanMars/QYL-AI-Space/modules/models/base_model.py +0 -592
  23. spaces/AlhitawiMohammed22/CER_Hu-Evaluation-Metrics/test_eval_wer.py +0 -0
  24. spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py +0 -38
  25. spaces/Andy1621/uniformer_image_detection/mmdet/models/backbones/resnext.py +0 -153
  26. spaces/AndySAnker/DeepStruc/app.py +0 -94
  27. spaces/Aniquel/WizApp_Code_Generator/app.py +0 -31
  28. spaces/Ariharasudhan/YoloV5/utils/segment/general.py +0 -137
  29. spaces/Artples/llama-2-7b-chat/README.md +0 -14
  30. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/webencodings/x_user_defined.py +0 -325
  31. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/config/_validate_pyproject/fastjsonschema_validations.py +0 -0
  32. spaces/Autodog/nova/Dockerfile +0 -9
  33. spaces/Banbri/zcvzcv/src/lib/loadImage.ts +0 -14
  34. spaces/Benson/text-generation/Examples/Apklz.md +0 -70
  35. spaces/Benson/text-generation/Examples/Cazador Asesino Mod Apk Ilimitado Todo.md +0 -81
  36. spaces/BetterAPI/BetterChat_new/src/lib/types/SharedConversation.ts +0 -11
  37. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/commands/cache.py +0 -222
  38. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/test_boxes.py +0 -176
  39. spaces/ChevyWithAI/rvc-aicover/config.py +0 -88
  40. spaces/CikeyQI/meme-api/meme_generator/memes/maimai_join/__init__.py +0 -22
  41. spaces/CjangCjengh/Sanskrit-TTS/attentions.py +0 -300
  42. spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/backbone/fpn.py +0 -98
  43. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-9af10d66.js +0 -2
  44. spaces/Detomo/Object_detection/app.py +0 -58
  45. spaces/Detomo/ai-comic-generation/src/lib/generateSeed.ts +0 -3
  46. spaces/DragGan/DragGan-Inversion/PTI/criteria/localitly_regulizer.py +0 -65
  47. spaces/DragGan/DragGan/stylegan_human/training/networks_stylegan2.py +0 -824
  48. spaces/EPFL-VILAB/MultiMAE/mask2former/data/datasets/register_ade20k_full.py +0 -964
  49. spaces/EPFL-VILAB/MultiMAE/utils/random_erasing.py +0 -103
  50. spaces/Eddycrack864/Applio-Inference/tools/infer_cli.py +0 -67
spaces/101-5/gpt4free/g4f/.v1/gpt4free/you/README.md DELETED
@@ -1,38 +0,0 @@
1
- ### Example: `you` (use like openai pypi package) <a name="example-you"></a>
2
-
3
- ```python
4
-
5
- from gpt4free import you
6
-
7
- # simple request with links and details
8
- response = you.Completion.create(
9
- prompt="hello world",
10
- detailed=True,
11
- include_links=True, )
12
-
13
- print(response.dict())
14
-
15
- # {
16
- # "response": "...",
17
- # "links": [...],
18
- # "extra": {...},
19
- # "slots": {...}
20
- # }
21
- # }
22
-
23
- # chatbot
24
-
25
- chat = []
26
-
27
- while True:
28
- prompt = input("You: ")
29
- if prompt == 'q':
30
- break
31
- response = you.Completion.create(
32
- prompt=prompt,
33
- chat=chat)
34
-
35
- print("Bot:", response.text)
36
-
37
- chat.append({"question": prompt, "answer": response.text})
38
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/101-5/gpt4free/g4f/.v1/unfinished/bing/__ini__.py DELETED
@@ -1,108 +0,0 @@
1
- # Import necessary libraries
2
- import asyncio
3
- from json import dumps, loads
4
- from ssl import create_default_context
5
-
6
- import websockets
7
- from browser_cookie3 import edge
8
- from certifi import where
9
- from requests import get
10
-
11
- # Set up SSL context
12
- ssl_context = create_default_context()
13
- ssl_context.load_verify_locations(where())
14
-
15
-
16
- def format(msg: dict) -> str:
17
- """Format message as JSON string with delimiter."""
18
- return dumps(msg) + '\x1e'
19
-
20
-
21
- def get_token():
22
- """Retrieve token from browser cookies."""
23
- cookies = {c.name: c.value for c in edge(domain_name='bing.com')}
24
- return cookies['_U']
25
-
26
-
27
- class AsyncCompletion:
28
- async def create(
29
- prompt: str = 'hello world',
30
- optionSets: list = [
31
- 'deepleo',
32
- 'enable_debug_commands',
33
- 'disable_emoji_spoken_text',
34
- 'enablemm',
35
- 'h3relaxedimg'
36
- ],
37
- token: str = get_token()):
38
- """Create a connection to Bing AI and send the prompt."""
39
-
40
- # Send create request
41
- create = get('https://edgeservices.bing.com/edgesvc/turing/conversation/create',
42
- headers={
43
- 'host': 'edgeservices.bing.com',
44
- 'authority': 'edgeservices.bing.com',
45
- 'cookie': f'_U={token}',
46
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
47
- }
48
- )
49
-
50
- # Extract conversation data
51
- conversationId = create.json()['conversationId']
52
- clientId = create.json()['clientId']
53
- conversationSignature = create.json()['conversationSignature']
54
-
55
- # Connect to WebSocket
56
- wss = await websockets.connect('wss://sydney.bing.com/sydney/ChatHub', max_size=None, ssl=ssl_context,
57
- extra_headers={
58
- # Add necessary headers
59
- }
60
- )
61
-
62
- # Send JSON protocol version
63
- await wss.send(format({'protocol': 'json', 'version': 1}))
64
- await wss.recv()
65
-
66
- # Define message structure
67
- struct = {
68
- # Add necessary message structure
69
- }
70
-
71
- # Send message
72
- await wss.send(format(struct))
73
-
74
- # Process responses
75
- base_string = ''
76
- final = False
77
- while not final:
78
- objects = str(await wss.recv()).split('\x1e')
79
- for obj in objects:
80
- if obj is None or obj == '':
81
- continue
82
-
83
- response = loads(obj)
84
- if response.get('type') == 1 and response['arguments'][0].get('messages', ):
85
- response_text = response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get(
86
- 'text')
87
-
88
- yield (response_text.replace(base_string, ''))
89
- base_string = response_text
90
-
91
- elif response.get('type') == 2:
92
- final = True
93
-
94
- await wss.close()
95
-
96
-
97
- async def run():
98
- """Run the async completion and print the result."""
99
- async for value in AsyncCompletion.create(
100
- prompt='summarize cinderella with each word beginning with a consecutive letter of the alphabet, a-z',
101
- optionSets=[
102
- "galileo",
103
- ]
104
- ):
105
- print(value, end='', flush=True)
106
-
107
-
108
- asyncio.run(run())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Visual Studio 2015 for Windows and Become a Master of Multiple Platforms.md DELETED
@@ -1,32 +0,0 @@
1
-
2
- <h1>How to Download Visual Studio 2015 for Windows and What's New in It</h1>
3
- <p>Visual Studio 2015 is a powerful and versatile integrated development environment (IDE) that allows you to create applications for various platforms, such as Windows, web, mobile, cloud, and more. It supports multiple programming languages, such as C#, C++, Visual Basic, Python, JavaScript, and more. It also offers many features and tools to help you code faster, debug easier, test better, and collaborate more efficiently.</p>
4
- <h2>download visual studio 2015 for windows</h2><br /><p><b><b>Download Zip</b> - <a href="https://byltly.com/2uKwD3">https://byltly.com/2uKwD3</a></b></p><br /><br />
5
- <p>In this article, we will show you how to download Visual Studio 2015 for Windows and what's new in it.</p>
6
-
7
- <h2>How to Download Visual Studio 2015 for Windows</h2>
8
- <p>Downloading Visual Studio 2015 for Windows is very simple and fast. Just follow these steps:</p>
9
- <ol>
10
- <li>Go to the official website of Visual Studio 2015: <a href="https://visualstudio.microsoft.com/vs/older-downloads/">https://visualstudio.microsoft.com/vs/older-downloads/</a></li>
11
- <li>Scroll down to the "Visual Studio 2015" section and click on the "Download" button next to the edition you want. You can choose from Community, Professional, or Enterprise editions. The Community edition is free for individual developers, open-source projects, academic research, education, and small teams. The Professional and Enterprise editions require a subscription or a trial license.</li>
12
- <li>Save the setup file on your PC and run it.</li>
13
- <li>Follow the instructions to install Visual Studio 2015 on your PC. You can customize the installation by selecting the features and components you want.</li>
14
- <li>Launch Visual Studio 2015 and sign in with your Microsoft account if prompted.</li>
15
- </ol>
16
-
17
- <h2>What's New in Visual Studio 2015</h2>
18
- <p>Visual Studio 2015 introduces many new features and improvements that make it easier and more productive to develop applications for various platforms. Here are some of the highlights:</p>
19
- <ul>
20
- <li>Cross-platform development: You can use Visual Studio 2015 to create applications for Windows, iOS, Android, Linux, and Mac OS using a single code base. You can use C#, C++, or HTML/JavaScript as your programming language and leverage the power of Xamarin, Cordova, or Unity frameworks.</li>
21
- <li>New languages and tools: You can use Visual Studio 2015 to code in new languages such as Python, Node.js, Go, and R. You can also use new tools such as Visual Studio Code Editor, which is a lightweight and fast code editor that supports multiple languages and platforms.</li>
22
- <li>Improved debugging and diagnostics: You can use Visual Studio 2015 to debug your applications more easily and effectively. You can use new features such as IntelliTrace, which records the execution history of your code and lets you replay it step by step; PerfTips, which shows you the performance impact of each line of code; Diagnostic Tools window, which shows you various metrics and events related to your application's performance and health; and more.</li>
23
- <li>Enhanced testing and quality: You can use Visual Studio 2015 to test your applications more thoroughly and efficiently. You can use new features such as Live Unit Testing, which runs your unit tests automatically as you code and shows you the results in real time; CodeLens, which shows you useful information about your code such as references, changes, authors, tests, etc.; Code Analysis, which helps you detect and fix code issues such as errors, warnings, style violations, etc.; and more.</li>
24
- <li>Better collaboration and DevOps: You can use Visual Studio 2015 to collaborate more effectively with your team members and deliver your applications faster and more reliably. You can use new features such as Team Explorer, which integrates with various source control systems such as Git, TFS, SVN, etc.; Code Review, which lets you request and provide feedback on code changes; Pull Requests, which lets you merge code changes from different branches; Continuous Delivery Tools, which lets you automate your build-deploy-test cycle; and more.</li>
25
- </ul>
26
-
27
- <h2>Conclusion</h2>
28
- <p>Visual Studio 2015 is a great IDE that offers many features and tools to help you create amazing applications for various platforms. It supports multiple programming languages, cross-platform development, improved debugging and diagnostics, enhanced testing and quality, better collaboration and DevOps, and more.</p>
29
- <p></p>
30
- <p>If you want to download Visual Studio 2015 for Windows and try</p> ddb901b051<br />
31
- <br />
32
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Borderlands 2 Crack Only-SKIDROW Cheat Engine LINK.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Borderlands 2 Crack Only-SKIDROW Cheat Engine</h2><br /><p><b><b>Download Zip</b> &#9734; <a href="https://imgfil.com/2uxYlQ">https://imgfil.com/2uxYlQ</a></b></p><br /><br />
2
- <br />
3
- d5da3c52bf<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Cricket Batting Tips In Tamil Pdf 26 __EXCLUSIVE__.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Cricket Batting Tips In Tamil Pdf 26</h2><br /><p><b><b>Download</b> &#10040;&#10040;&#10040; <a href="https://imgfil.com/2uxYxN">https://imgfil.com/2uxYxN</a></b></p><br /><br />
2
-
3
- The West Indies Cricket Umpires' Association (WICUA) will be holding its 29 th ... Passover Lamb Preparation, Audi R10 2019 , Hatch Meaning In Tamil, DDCA ... dates & notification, application process, exam level‎, mode‎, preparation tips, ... 2019 - Download 2014 asa umpires test answers PDF Full Ebook online right ... 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Como baixar Solar Smash APK e jogar o simulador de destruio do planeta mais divertido.md DELETED
@@ -1,148 +0,0 @@
1
-
2
- <h1>Baixar Solar Smash APK: Um Simulador de Destruição Planetária</h1>
3
- <p>Você já imaginou como seria destruir um planeta inteiro com um simples toque na tela do seu celular? Se você é fascinado pelo espaço e pelas possibilidades de exploração e destruição que ele oferece, então você precisa conhecer o Solar Smash, um jogo de simulação que permite que você use diversas armas e desastres para aniquilar planetas e sistemas solares. Neste artigo, vamos te mostrar o que é o Solar Smash, como baixar o APK do jogo no seu dispositivo Android, quais são as principais características do jogo, e quais são as dicas e truques para jogar Solar Smash com mais diversão e eficiência. Vamos lá?</p>
4
- <h2>O que é Solar Smash?</h2>
5
- <h3>Um jogo de simulação que permite destruir planetas com armas variadas</h3>
6
- <p>Solar Smash é um jogo de simulação desenvolvido pela Paradyme Games, que permite que você use uma variedade de armas diferentes para destruir o planeta que você escolher. Essas armas incluem mísseis nucleares, lasers, asteroides, invasões alienígenas, buracos negros, explosões solares, e muito mais. Você pode combinar as armas entre si para criar reações espetaculares e ver o planeta se desintegrar em pedaços. Você também pode personalizar as suas armas e os seus planetas, mudando a cor, o tamanho, a velocidade, a gravidade, e outros aspectos.</p>
7
- <h2>baixar solar smash apk</h2><br /><p><b><b>DOWNLOAD</b> &mdash;&mdash;&mdash;&mdash;&mdash; <a href="https://jinyurl.com/2uNKP1">https://jinyurl.com/2uNKP1</a></b></p><br /><br />
8
- <h3>Um jogo gratuito e divertido para os amantes do espaço</h3>
9
- <p>O melhor de tudo é que o Solar Smash é um jogo completamente gratuito para jogar no seu dispositivo Android. Você não precisa pagar nada para baixar o APK do jogo ou para acessar as suas funcionalidades. Você também não vai se incomodar com anúncios invasivos ou compras dentro do aplicativo. O Solar Smash é um jogo ideal para quem ama o espaço e quer se divertir com as possibilidades de simulação e destruição que ele oferece. Você pode passar horas brincando com as diferentes armas e planetas, experimentando as diferentes interações e resultados.</p>
10
- <h3>Um jogo com gráficos realistas e imagens da NASA</h3>
11
- <p>Outro ponto forte do Solar Smash é a sua qualidade gráfica. O jogo possui gráficos realistas e detalhados, que mostram os planetas e as armas com fidelidade. Você pode ver os estragos causados pelas armas nos planetas, como crateras, rachaduras, fogo, fumaça, e até mesmo a destruição completa do núcleo. Além disso, o jogo usa imagens reais da NASA para representar os planetas e os sistemas solares, o que aumenta ainda mais o realismo e a imersão do jogo. Você pode ver a Terra, Marte, Júpiter, Saturno, e outros planetas como eles realmente são, e também explorar planetas secretos que o jogo esconde.</p>
12
- <h2>Como baixar Solar Smash APK?</h2>
13
- <h3>Passo a passo para baixar o jogo no seu dispositivo Android</h3>
14
- <p>Baixar o Solar Smash APK no seu dispositivo Android é muito fácil e rápido. Basta seguir os passos abaixo:</p>
15
- <ol>
16
- <li>Acesse o site oficial do Solar Smash ou um site confiável que ofereça o download do APK do jogo.</li>
17
- <li>Clique no botão de download e aguarde o arquivo APK ser baixado no seu dispositivo.</li>
18
- <li>Localize o arquivo APK na pasta de downloads do seu dispositivo e clique nele para iniciar a instalação.</li>
19
- <li>Se for solicitado, permita a instalação de fontes desconhecidas nas configurações do seu dispositivo.</li>
20
- <li>Siga as instruções na tela para concluir a instalação do jogo.</li>
21
- <li>Pronto! Agora você pode abrir o jogo e começar a destruir planetas.</li>
22
- </ol>
23
- <h3>Requisitos mínimos e recomendações para rodar o jogo</h3>
24
- <p>Para rodar o Solar Smash no seu dispositivo Android, você precisa ter pelo menos a versão 4.4 do sistema operacional. Além disso, você precisa ter pelo menos 100 MB de espaço livre na memória do seu dispositivo. O jogo não exige uma conexão com a internet para funcionar, mas é recomendável que você esteja conectado para receber as atualizações e as novidades do jogo. O jogo também funciona melhor em dispositivos com uma boa capacidade de processamento e uma boa resolução de tela, para aproveitar melhor os gráficos e os efeitos do jogo.</p>
25
- <h3>Alternativas para baixar o jogo no seu PC ou Mac</h3>
26
- <p>Se você preferir jogar o Solar Smash no seu PC ou Mac, você também pode fazer isso com a ajuda de um emulador de Android. Um emulador é um programa que simula o funcionamento de um dispositivo Android no seu computador, permitindo que você instale e execute aplicativos e jogos que são exclusivos para esse sistema. Existem vários emuladores disponíveis na internet, mas alguns dos mais populares são o BlueStacks, o NoxPlayer, e o MEmu. Para baixar o Solar Smash no seu PC ou Mac usando um emulador, você precisa seguir os passos abaixo:</p>
27
- <ol>
28
- <li>Baixe e instale um emulador de Android no seu PC ou Mac.</li>
29
- <li>Abra o emulador e faça login com a sua conta Google.</li>
30
- <li>Acesse a Google Play Store ou um site confiável que ofereça o download do APK do Solar Smash.</li>
31
- <li>Baixe e instale o Solar Smash no emulador.</li>
32
- <li>Pronto! Agora você pode jogar o Solar Smash no seu PC ou Mac.</li>
33
- </ol>
34
- <h2>Quais são as principais características de Solar Smash?</h2> <h3>Modos de jogo: Planet Smash e System Smash</h3>
35
- <p>O Solar Smash possui dois modos de jogo principais: o Planet Smash e o System Smash. No Planet Smash, você pode escolher um planeta específico para destruir com as armas que você quiser. Você pode ver o planeta em diferentes ângulos e zooms, e acompanhar os danos causados pelas suas ações. Você também pode alterar as características do planeta, como a cor, o tamanho, a gravidade, e a atmosfera. No System Smash, você pode escolher um sistema solar inteiro para destruir com as armas que você quiser. Você pode ver os planetas orbitando em torno do sol, e interagir com eles de diversas formas. Você também pode alterar as características do sistema solar, como a distância entre os planetas, a velocidade da órbita, e a luminosidade do sol.</p>
36
- <p>como baixar solar smash apk no android<br />
37
- baixar solar smash apk mod dinheiro infinito<br />
38
- baixar solar smash apk atualizado 2023<br />
39
- baixar solar smash apk para pc<br />
40
- baixar solar smash apk grátis<br />
41
- baixar solar smash apk hackeado<br />
42
- baixar solar smash apk versão antiga<br />
43
- baixar solar smash apk pelo mediafire<br />
44
- baixar solar smash apk sem anúncios<br />
45
- baixar solar smash apk com tudo desbloqueado<br />
46
- baixar solar smash apk: space strategy<br />
47
- baixar solar smash apk pelo mega<br />
48
- baixar solar smash apk com armas novas<br />
49
- baixar solar smash apk para ios<br />
50
- baixar solar smash apk offline<br />
51
- baixar solar smash apk com multiplayer<br />
52
- baixar solar smash apk com planetas personalizados<br />
53
- baixar solar smash apk pelo aptoide<br />
54
- baixar solar smash apk com modo sandbox<br />
55
- baixar solar smash apk com missões<br />
56
- baixar solar smash apk com gráficos melhorados<br />
57
- baixar solar smash apk com efeitos sonoros<br />
58
- baixar solar smash apk com controle de tempo<br />
59
- baixar solar smash apk com zoom<br />
60
- baixar solar smash apk com rotação de planetas<br />
61
- baixar solar smash apk com física realista<br />
62
- baixar solar smash apk com simulação de colisão<br />
63
- baixar solar smash apk com modo criativo<br />
64
- baixar solar smash apk com modo destruição<br />
65
- baixar solar smash apk com modo sobrevivência<br />
66
- baixar solar smash apk com modo campanha<br />
67
- baixar solar smash apk com modo online<br />
68
- baixar solar smash apk com modo cooperativo<br />
69
- baixar solar smash apk com modo competitivo<br />
70
- baixar solar smash apk com ranking mundial<br />
71
- baixar solar smash apk com conquistas e troféus<br />
72
- baixar solar smash apk com suporte a joystick<br />
73
- baixar solar smash apk com suporte a VR<br />
74
- baixar solar smash apk com suporte a 4K<br />
75
- baixar solar smash apk com suporte a 60 FPS<br />
76
- baixar solar smash apk sem vírus e malware<br />
77
- baixar solar smash apk sem root e jailbreak<br />
78
- baixar solar smash apk sem precisar de internet<br />
79
- baixar solar smash apk sem precisar de cadastro<br />
80
- baixar solar smash apk sem precisar de licença<br />
81
- baixar solar smash apk sem precisar de atualização<br />
82
- baixar solar smash apk sem precisar de espaço na memória<br />
83
- baixar solar smash apk sem precisar de emulador<br />
84
- baixar solar smash apk sem precisar de código de verificação</p>
85
- <h3>Armas e desastres: mísseis, lasers, asteroides e mais</h3>
86
- <p>O Solar Smash possui uma grande variedade de armas e desastres que você pode usar para destruir os planetas e os sistemas solares. Essas armas e desastres incluem:</p>
87
- <ul>
88
- <li>Mísseis nucleares: projéteis explosivos que causam grandes crateras e incêndios nos planetas.</li>
89
- <li>Lasers: raios de energia que atravessam os planetas e causam rachaduras e fumaça.</li>
90
- <li>Asteroides: rochas espaciais que colidem com os planetas e causam impactos devastadores.</li>
91
- <li>Invasões alienígenas: naves extraterrestres que atacam os planetas com lasers e bombas.</li>
92
- <li>Buracos negros: objetos cósmicos que sugam os planetas e os sistemas solares para dentro deles.</li>
93
- <li>Explosões solares: erupções de plasma que saem do sol e atingem os planetas com radiação e calor.</li>
94
- <li>E muito mais: terremotos, vulcões, tsunamis, furacões, tornados, meteoros, cometas, etc.</li>
95
- </ul>
96
- <h3>Planetas e sistemas: Terra, Marte, Júpiter e outros</h3>
97
- <p>O Solar Smash possui vários planetas e sistemas solares que você pode escolher para destruir. Esses planetas e sistemas solares são baseados em imagens reais da NASA, o que torna o jogo mais realista e interessante. Você pode ver os planetas como eles realmente são, com seus continentes, oceanos, nuvens, anéis, luas, etc. Você também pode ver os sistemas solares como eles realmente são, com seus planetas orbitando em torno do sol em diferentes velocidades e distâncias. Alguns dos planetas e sistemas solares que você pode escolher são:</p>
98
- <ul>
99
- <li>Terra: o nosso planeta natal, com seus sete continentes, seus cinco oceanos, sua atmosfera azul, e sua lua.</li>
100
- <li>Marte: o planeta vermelho, com seus desertos, suas montanhas, seus vulcões, seus polos gelados, e suas duas luas.</li>
101
- <li>Júpiter: o maior planeta do sistema solar, com sua atmosfera colorida, sua grande mancha vermelha, seus anéis finos, e suas 79 luas.</li>
102
- <li>Saturno: o segundo maior planeta do sistema solar, com sua atmosfera amarelada, seus anéis brilhantes, e suas 82 luas.</li>
103
- <li>Urano: o sétimo planeta do sistema solar, com sua atmosfera azul-esverdeada, seu eixo inclinado, seus anéis escuros, e suas 27 luas.</li>
104
- <li>Netuno: o oitavo planeta do sistema solar, com sua atmosfera azulada, seu vento forte, seus anéis finos, e suas 14 luas.</li>
105
- <li>Sistema Solar: o nosso sistema planetário, com o sol e os oito planetas principais, além de planetas anões, asteroides, cometas, e outros corpos celestes.</li>
106
- <li>E muito mais: Vênus, Mercúrio, Plutão, Ceres, Haumea, Makemake, Eris, etc.</li>
107
- </ul>
108
- <h3>Planetas secretos: como desbloqueá-los e quais são</h3>
109
- <p>Além dos planetas e sistemas solares conhecidos, o Solar Smash também possui alguns planetas secretos que você pode desbloquear e destruir. Esses planetas secretos são baseados em referências da cultura pop, como filmes, séries, jogos, livros, etc. Para desbloquear os planetas secretos, você precisa seguir algumas dicas e pistas que o jogo te dá. Por exemplo, para desbloquear o planeta Tatooine, da saga Star Wars, você precisa usar a arma de laser verde no planeta Marte. Para desbloquear o planeta Namekusei, da série Dragon Ball Z, você precisa usar a arma de explosão solar no planeta Júpiter. Alguns dos planetas secretos que você pode desbloquear são:</p>
110
- <ul>
111
- <li>Tatooine: o planeta desértico com dois sóis, onde vivem os personagens Luke Skywalker e Obi-Wan Kenobi.</li>
112
- <li>Namekusei: o planeta verde com três sóis, onde vivem os personagens Piccolo e Dende.</li>
113
- <li>Hogwarts: a escola de magia e bruxaria onde estudam os personagens Harry Potter e seus amigos.</li>
114
- <li>Pandora: o planeta exuberante com uma lua gigante, onde vivem os personagens Jake Sully e Neytiri.</li>
115
- <li>Middle Earth: o mundo fantástico com vários reinos e raças, onde se passa a história de O Senhor dos Anéis.</li>
116
- <li>E muito mais: Nárnia, Westeros, Asgard, Cybertron, etc.</li>
117
- </ul>
118
- <h2>Quais são as dicas e truques para jogar Solar Smash?</h2> <h3>Como completar os desafios e conquistas do jogo</h3>
119
- <p>O Solar Smash possui uma série de desafios e conquistas que você pode completar para ganhar recompensas e desbloquear novas armas e planetas. Esses desafios e conquistas são baseados em diferentes objetivos, como destruir um determinado número de planetas, usar uma determinada arma, causar um determinado tipo de dano, etc. Para ver os desafios e conquistas disponíveis, você pode acessar o menu do jogo e clicar no ícone de troféu. Lá, você pode ver os seus progressos e as suas recompensas. Alguns exemplos de desafios e conquistas são:</p>
120
- <ul>
121
- <li>Destruir 10 planetas com mísseis nucleares.</li>
122
- <li>Destruir o núcleo de 5 planetas com lasers.</li>
123
- <li>Causar um terremoto de magnitude 10 em um planeta.</li>
124
- <li>Criar um buraco negro com um asteroide.</li>
125
- <li>Desbloquear todos os planetas secretos.</li>
126
- <li>E muito mais: destruir a lua, causar uma invasão alienígena, criar uma supernova, etc.</li>
127
- </ul>
128
- <h3>Como acertar o ponto certo para destruir o núcleo do planeta</h3>
129
- <p>Uma das formas mais eficientes e satisfatórias de destruir um planeta no Solar Smash é acertar o seu núcleo com uma arma poderosa, como um laser ou um míssil. Isso vai causar uma explosão enorme que vai despedaçar o planeta em vários fragmentos. No entanto, acertar o núcleo do planeta não é tão fácil quanto parece. Você precisa ter uma boa mira e uma boa precisão para atingir o ponto certo. Uma dica para facilitar essa tarefa é usar o zoom para aproximar a imagem do planeta e ver melhor o seu centro. Outra dica é usar a arma de raios X para ver através do planeta e localizar o seu núcleo. Assim, você pode mirar com mais confiança e acertar o alvo com mais facilidade.</p>
130
- <h3>Como combinar as armas e os elementos para criar reações incríveis</h3>
131
- <p>Outra forma de se divertir no Solar Smash é combinar as diferentes armas e os diferentes elementos para criar reações incríveis e inesperadas. Você pode usar a sua criatividade e a sua curiosidade para experimentar as diversas possibilidades que o jogo oferece. Por exemplo, você pode usar a arma de chuva para molhar um planeta e depois usar a arma de raio para causar um choque elétrico. Ou você pode usar a arma de gelo para congelar um planeta e depois usar a arma de fogo para causar uma explosão térmica. Ou você pode usar a arma de gravidade para atrair vários asteroides para um planeta e depois usar a arma de buraco negro para sugá-los todos. As combinações são infinitas e podem surpreender você com os seus resultados.</p>
132
- <h3>Como personalizar as suas armas e os seus planetas</h3>
133
- <p>O Solar Smash também permite que você personalize as suas armas e os seus planetas, mudando vários aspectos como a cor, o tamanho, a velocidade, a gravidade, etc. Isso pode tornar o jogo mais divertido e mais desafiador, pois você pode criar cenários diferentes e testar os seus limites. Para personalizar as suas armas e os seus planetas, você precisa acessar o menu do jogo e clicar no ícone de engrenagem. Lá, você pode ver as opções disponíveis para cada arma e cada planeta, e ajustá-las conforme a sua preferência. Por exemplo, você pode mudar a cor do seu laser, o tamanho do seu asteroide, a velocidade do seu míssil, a gravidade do seu planeta, etc.</p>
134
- <h2>Conclusão</h2>
135
- <p>O Solar Smash é um jogo de simulação que permite que você destrua planetas e sistemas solares com diversas armas e desastres. O jogo é gratuito, divertido, realista, e viciante. Você pode baixar o APK do jogo no seu dispositivo Android ou no seu PC ou Mac com um emulador. Você também pode escolher entre vários planetas e sistemas solares conhecidos ou secret os, e personalizar as suas armas e os seus planetas. Você também pode completar desafios e conquistas, acertar o núcleo dos planetas, combinar as armas e os elementos, e criar reações incríveis. O Solar Smash é um jogo que vai te proporcionar horas de diversão e destruição. Baixe já o Solar Smash APK e comece a sua aventura espacial!</p>
136
- <h2>FAQs</h2>
137
- <h3>O que é o Solar Smash?</h3>
138
- <p>O Solar Smash é um jogo de simulação que permite que você destrua planetas e sistemas solares com diversas armas e desastres.</p>
139
- <h3>Como baixar o Solar Smash APK?</h3>
140
- <p>Você pode baixar o Solar Smash APK no seu dispositivo Android ou no seu PC ou Mac com um emulador. Você precisa acessar um site confiável que ofereça o download do APK do jogo, baixar o arquivo, e instalá-lo no seu dispositivo.</p>
141
- <h3>Quais são as principais características do Solar Smash?</h3>
142
- <p>O Solar Smash possui dois modos de jogo: Planet Smash e System Smash. Você pode escolher entre vários planetas e sistemas solares conhecidos ou secretos, e personalizar as suas armas e os seus planetas. Você também pode completar desafios e conquistas, acertar o núcleo dos planetas, combinar as armas e os elementos, e criar reações incríveis.</p>
143
- <h3>Quais são as melhores armas para destruir os planetas?</h3>
144
- <p>Isso depende do seu gosto e do seu objetivo. Algumas das armas mais poderosas e divertidas são os lasers, os mísseis nucleares, os asteroides, os buracos negros, e as explosões solares.</p>
145
- <h3>Como desbloquear os planetas secretos?</h3>
146
- <p>Você precisa seguir algumas dicas e pistas que o jogo te dá. Por exemplo, para desbloquear o planeta Tatooine, da saga Star Wars, você precisa usar a arma de laser verde no planeta Marte. Para desbloquear o planeta Namekusei, da série Dragon Ball Z, você precisa usar a arma de explosão solar no planeta Júpiter.</p> 401be4b1e0<br />
147
- <br />
148
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/utils/backups_test.py DELETED
@@ -1,138 +0,0 @@
1
-
2
- import os
3
- import shutil
4
- import hashlib
5
- import time
6
-
7
- LOGS_FOLDER = '/content/Applio-RVC-Fork/logs'
8
- WEIGHTS_FOLDER = '/content/Applio-RVC-Fork/weights'
9
- GOOGLE_DRIVE_PATH = '/content/drive/MyDrive/RVC_Backup'
10
-
11
- def import_google_drive_backup():
12
- print("Importing Google Drive backup...")
13
- GOOGLE_DRIVE_PATH = '/content/drive/MyDrive/RVC_Backup' # change this to your Google Drive path
14
- LOGS_FOLDER = '/content/Applio-RVC-Fork/logs'
15
- WEIGHTS_FOLDER = '/content/Applio-RVC-Fork/weights'
16
- weights_exist = False
17
- files_to_copy = []
18
- weights_to_copy = []
19
-
20
- def handle_files(root, files, is_weight_files=False):
21
- for filename in files:
22
- filepath = os.path.join(root, filename)
23
- if filename.endswith('.pth') and is_weight_files:
24
- weights_exist = True
25
- backup_filepath = os.path.join(WEIGHTS_FOLDER, os.path.relpath(filepath, GOOGLE_DRIVE_PATH))
26
- else:
27
- backup_filepath = os.path.join(LOGS_FOLDER, os.path.relpath(filepath, GOOGLE_DRIVE_PATH))
28
- backup_folderpath = os.path.dirname(backup_filepath)
29
- if not os.path.exists(backup_folderpath):
30
- os.makedirs(backup_folderpath)
31
- print(f'Created folder: {backup_folderpath}', flush=True)
32
- if is_weight_files:
33
- weights_to_copy.append((filepath, backup_filepath))
34
- else:
35
- files_to_copy.append((filepath, backup_filepath))
36
-
37
- for root, dirs, files in os.walk(os.path.join(GOOGLE_DRIVE_PATH, 'logs')):
38
- handle_files(root, files)
39
-
40
- for root, dirs, files in os.walk(os.path.join(GOOGLE_DRIVE_PATH, 'weights')):
41
- handle_files(root, files, True)
42
-
43
- # Copy files in batches
44
- total_files = len(files_to_copy)
45
- start_time = time.time()
46
- for i, (source, dest) in enumerate(files_to_copy, start=1):
47
- with open(source, 'rb') as src, open(dest, 'wb') as dst:
48
- shutil.copyfileobj(src, dst, 1024*1024) # 1MB buffer size
49
- # Report progress every 5 seconds or after every 100 files, whichever is less frequent
50
- if time.time() - start_time > 5 or i % 100 == 0:
51
- print(f'\rCopying file {i} of {total_files} ({i * 100 / total_files:.2f}%)', end="")
52
- start_time = time.time()
53
- print(f'\nImported {len(files_to_copy)} files from Google Drive backup')
54
-
55
- # Copy weights in batches
56
- total_weights = len(weights_to_copy)
57
- start_time = time.time()
58
- for i, (source, dest) in enumerate(weights_to_copy, start=1):
59
- with open(source, 'rb') as src, open(dest, 'wb') as dst:
60
- shutil.copyfileobj(src, dst, 1024*1024) # 1MB buffer size
61
- # Report progress every 5 seconds or after every 100 files, whichever is less frequent
62
- if time.time() - start_time > 5 or i % 100 == 0:
63
- print(f'\rCopying weight file {i} of {total_weights} ({i * 100 / total_weights:.2f}%)', end="")
64
- start_time = time.time()
65
- if weights_exist:
66
- print(f'\nImported {len(weights_to_copy)} weight files')
67
- print("Copied weights from Google Drive backup to local weights folder.")
68
- else:
69
- print("\nNo weights found in Google Drive backup.")
70
- print("Google Drive backup import completed.")
71
-
72
- def backup_files():
73
- print("\n Starting backup loop...")
74
- last_backup_timestamps_path = os.path.join(LOGS_FOLDER, 'last_backup_timestamps.txt')
75
- fully_updated = False # boolean to track if all files are up to date
76
- try:
77
- with open(last_backup_timestamps_path, 'r') as f:
78
- last_backup_timestamps = dict(line.strip().split(':') for line in f)
79
- except:
80
- last_backup_timestamps = {}
81
-
82
- while True:
83
- updated = False
84
- files_to_copy = []
85
- files_to_delete = []
86
-
87
- for root, dirs, files in os.walk(LOGS_FOLDER):
88
- for filename in files:
89
- if filename != 'last_backup_timestamps.txt':
90
- filepath = os.path.join(root, filename)
91
- if os.path.isfile(filepath):
92
- backup_filepath = os.path.join(GOOGLE_DRIVE_PATH, os.path.relpath(filepath, LOGS_FOLDER))
93
- backup_folderpath = os.path.dirname(backup_filepath)
94
-
95
- if not os.path.exists(backup_folderpath):
96
- os.makedirs(backup_folderpath)
97
- print(f'Created backup folder: {backup_folderpath}', flush=True)
98
-
99
- # check if file has changed since last backup
100
- last_backup_timestamp = last_backup_timestamps.get(filepath)
101
- current_timestamp = os.path.getmtime(filepath)
102
- if last_backup_timestamp is None or float(last_backup_timestamp) < current_timestamp:
103
- files_to_copy.append((filepath, backup_filepath)) # add to list of files to copy
104
- last_backup_timestamps[filepath] = str(current_timestamp) # update last backup timestamp
105
- updated = True
106
- fully_updated = False # if a file is updated, all files are not up to date
107
-
108
- # check if any files were deleted in Colab and delete them from the backup drive
109
- for filepath in list(last_backup_timestamps.keys()):
110
- if not os.path.exists(filepath):
111
- backup_filepath = os.path.join(GOOGLE_DRIVE_PATH, os.path.relpath(filepath, LOGS_FOLDER))
112
- if os.path.exists(backup_filepath):
113
- files_to_delete.append(backup_filepath) # add to list of files to delete
114
- del last_backup_timestamps[filepath]
115
- updated = True
116
- fully_updated = False # if a file is deleted, all files are not up to date
117
-
118
- # Copy files in batches
119
- if files_to_copy:
120
- for source, dest in files_to_copy:
121
- shutil.copy2(source, dest)
122
- print(f'Copied or updated {len(files_to_copy)} files')
123
-
124
- # Delete files in batches
125
- if files_to_delete:
126
- for file in files_to_delete:
127
- os.remove(file)
128
- print(f'Deleted {len(files_to_delete)} files')
129
-
130
- if not updated and not fully_updated:
131
- print("Files are up to date.")
132
- fully_updated = True # if all files are up to date, set the boolean to True
133
- copy_weights_folder_to_drive()
134
-
135
- with open(last_backup_timestamps_path, 'w') as f:
136
- for filepath, timestamp in last_backup_timestamps.items():
137
- f.write(f'{filepath}:{timestamp}\n')
138
- time.sleep(15) # wait for 15 seconds before checking again
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AFRAC/NCM_DEMO/app.py DELETED
@@ -1,55 +0,0 @@
1
- #!/usr/bin/env python
2
- # coding: utf-8
3
-
4
- import subprocess
5
-
6
- # Define the command you want to run
7
- command = 'pip install --no-cache-dir gradio==3.26.0'
8
-
9
- # Use subprocess to run the command
10
- try:
11
- subprocess.check_call(command, shell=True)
12
- print("Installation successful!")
13
- except subprocess.CalledProcessError as e:
14
- print(f"Installation failed with error: {e}")
15
-
16
- import pandas as pd
17
- import numpy as np
18
- import tensorflow as tf
19
- from tensorflow.keras.preprocessing.text import Tokenizer
20
- from sklearn.preprocessing import OneHotEncoder
21
- import gradio as gr
22
- from gradio import components
23
-
24
- print("\n\n\n****************************>>>>>>>>> GRADIO VERSION: ",gr.__version__,"\n\n\n")
25
-
26
- model = tf.keras.models.load_model("NCM_DEMO.H5", compile=False)
27
-
28
- ncm_table = pd.read_csv("https://raw.githubusercontent.com/mfilipak/AFRAC_IA/main/DATASET/TABELA_NCM.CSV", index_col="CODIGO")
29
- valid_ncms = sorted(ncm_table[ncm_table.index > 1000000].index)
30
- ncmst = np.array(valid_ncms)
31
- ncmst = ncmst.reshape([-1,1])
32
- ohe = OneHotEncoder()
33
- ohe.fit(ncmst)
34
-
35
- tk = Tokenizer(num_words=None, char_level=True, oov_token='UNK')
36
- tk.word_index = {'UNK': 1, ' ': 2, 'a': 3, 'o': 4, 'e': 5, 'r': 6, 'i': 7, 'c': 8, 'l': 9, 's': 10, 't': 11, 'n': 12, 'm': 13, '0': 14, 'p': 15, 'g': 16, 'd': 17, 'u': 18, 'b': 19, '1': 20, 'f': 21, 'h': 22, '2': 23, '5': 24, 'v': 25, '3': 26, 'k': 27, '4': 28, '.': 29, 'x': 30, '6': 31, '8': 32, '-': 33, '7': 34, '9': 35, 'j': 36, 'z': 37, '/': 38, 'y': 39, 'q': 40, 'w': 41, ',': 42, ':': 43, '(': 44, ')': 45, '_': 46, '#': 47, '+': 48, '*': 49, '%': 50, '"': 51, "'": 52, 'ç': 53, '&': 54, 'ã': 55, ';': 56, ']': 57, '[': 58, '$': 59, 'á': 60, '\\': 61, '|': 62, 'é': 63, 'º': 64, 'ó': 65, '!': 66, '=': 67, 'í': 68, 'ê': 69, '?': 70, '>': 71, '@': 72, '¿': 73, '°': 74, 'ú': 75, '\xa0': 76, 'ô': 77, 'â': 78, '`': 79, 'à': 80, 'õ': 81, 'ï': 82, 'ª': 83, '²': 84, '{': 85, '<': 86, '~': 87, 'è': 88, '§': 89, 'ø': 90, 'ñ': 91, '³': 92, 'û': 93, 'ù': 94, '\xad': 95, '}': 96, '\x81': 97, 'ä': 98, 'ü': 99, '¶': 100, '^': 101, '€': 102, '¹': 103, 'µ': 104, '®': 105, '¡': 106}
37
-
38
- def PredictNCM(txt):
39
- x = [txt[:120].lower() ]
40
-
41
- print(txt)
42
-
43
- X = np.array(tk.texts_to_sequences([_+(120-len(_))*" " for _ in x]))
44
- pred = model.predict(X, verbose=0)[0]
45
- aux = np.argsort(pred)[::-1][:5]
46
- return {f"{int(valid_ncms[i]):08}":float(pred[i]) for i in aux}, ncm_table.loc[valid_ncms[aux[0]],"DESCRICAO"]
47
-
48
-
49
- demo = gr.Interface(fn=PredictNCM, outputs=[components.Label(label="NCMs"), components.Textbox(label="Descrição do NCM")], title='AFRAC NOTA CERTA',
50
- inputs=components.Textbox(label="DESCRIÇÃO"),
51
- examples=["Coca-Cola PET 2l","Pepsi 500ml", "Guaraná Antarctica 2l", "Ração Bocão Premium","Mentos Kiss Morango", "Bombom Sonho de Valsa"])
52
- demo.launch()
53
- #display(demo.launch(share=True))
54
- #demo.close()
55
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/StyleGANEX/utils/common.py DELETED
@@ -1,87 +0,0 @@
1
- import cv2
2
- import numpy as np
3
- from PIL import Image
4
- import matplotlib.pyplot as plt
5
-
6
-
7
- # Log images
8
- def log_input_image(x, opts):
9
- if opts.label_nc == 0:
10
- return tensor2im(x)
11
- elif opts.label_nc == 1:
12
- return tensor2sketch(x)
13
- else:
14
- return tensor2map(x)
15
-
16
-
17
- def tensor2im(var):
18
- var = var.cpu().detach().transpose(0, 2).transpose(0, 1).numpy()
19
- var = ((var + 1) / 2)
20
- var[var < 0] = 0
21
- var[var > 1] = 1
22
- var = var * 255
23
- return Image.fromarray(var.astype('uint8'))
24
-
25
-
26
- def tensor2map(var):
27
- mask = np.argmax(var.data.cpu().numpy(), axis=0)
28
- colors = get_colors()
29
- mask_image = np.ones(shape=(mask.shape[0], mask.shape[1], 3))
30
- for class_idx in np.unique(mask):
31
- mask_image[mask == class_idx] = colors[class_idx]
32
- mask_image = mask_image.astype('uint8')
33
- return Image.fromarray(mask_image)
34
-
35
-
36
- def tensor2sketch(var):
37
- im = var[0].cpu().detach().numpy()
38
- im = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)
39
- im = (im * 255).astype(np.uint8)
40
- return Image.fromarray(im)
41
-
42
-
43
- # Visualization utils
44
- def get_colors():
45
- # currently support up to 19 classes (for the celebs-hq-mask dataset)
46
- colors = [[0, 0, 0], [204, 0, 0], [76, 153, 0], [204, 204, 0], [51, 51, 255], [204, 0, 204], [0, 255, 255],
47
- [255, 204, 204], [102, 51, 0], [255, 0, 0], [102, 204, 0], [255, 255, 0], [0, 0, 153], [0, 0, 204],
48
- [255, 51, 153], [0, 204, 204], [0, 51, 0], [255, 153, 51], [0, 204, 0]]
49
- return colors
50
-
51
-
52
- def vis_faces(log_hooks):
53
- display_count = len(log_hooks)
54
- fig = plt.figure(figsize=(8, 4 * display_count))
55
- gs = fig.add_gridspec(display_count, 3)
56
- for i in range(display_count):
57
- hooks_dict = log_hooks[i]
58
- fig.add_subplot(gs[i, 0])
59
- if 'diff_input' in hooks_dict:
60
- vis_faces_with_id(hooks_dict, fig, gs, i)
61
- else:
62
- vis_faces_no_id(hooks_dict, fig, gs, i)
63
- plt.tight_layout()
64
- return fig
65
-
66
-
67
- def vis_faces_with_id(hooks_dict, fig, gs, i):
68
- plt.imshow(hooks_dict['input_face'])
69
- plt.title('Input\nOut Sim={:.2f}'.format(float(hooks_dict['diff_input'])))
70
- fig.add_subplot(gs[i, 1])
71
- plt.imshow(hooks_dict['target_face'])
72
- plt.title('Target\nIn={:.2f}, Out={:.2f}'.format(float(hooks_dict['diff_views']),
73
- float(hooks_dict['diff_target'])))
74
- fig.add_subplot(gs[i, 2])
75
- plt.imshow(hooks_dict['output_face'])
76
- plt.title('Output\n Target Sim={:.2f}'.format(float(hooks_dict['diff_target'])))
77
-
78
-
79
- def vis_faces_no_id(hooks_dict, fig, gs, i):
80
- plt.imshow(hooks_dict['input_face'], cmap="gray")
81
- plt.title('Input')
82
- fig.add_subplot(gs[i, 1])
83
- plt.imshow(hooks_dict['target_face'])
84
- plt.title('Target')
85
- fig.add_subplot(gs[i, 2])
86
- plt.imshow(hooks_dict['output_face'])
87
- plt.title('Output')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIatUIUC/CodeLATS/app.py DELETED
@@ -1,109 +0,0 @@
1
- import streamlit as st
2
- import openai
3
- import os
4
- import sys
5
- import argparse
6
- sys.path.append('./lats')
7
- from lats_main import lats_main
8
-
9
- st.set_page_config(layout="wide")
10
-
11
- # Initialize session state variables if they don't exist.
12
- if 'response_content' not in st.session_state:
13
- st.session_state.response_content = None
14
-
15
- # Creating main columns for the chat and runtime notifications
16
- chat_col = st.container()
17
-
18
- chat_col.title("CodeLATS")
19
- description = """This tech demo is an implementation of Language Agent Tree Search (LATS) (https://arxiv.org/abs/2310.04406) built specifically for generating code in the form of python functions. It achieves :green[**state-of-the-art**] results on HumanEval with a :green[**94.4% pass@1 rate**] on GPT-4.
20
-
21
- Listed below is an example programming problem (https://leetcode.com/problems/longest-valid-parentheses/description/) to get started with.
22
-
23
- ```python
24
- Given a string containing just the characters '(' and ')', return the length of the longest valid (well-formed) parentheses substring
25
- ```
26
- :red[**NOTE:**] On average a call for a HumanEval or Leetcode question will cost around 5-30 cents on GPT-4, using the default parameters. This value may change depending on problem difficulty and parameters.
27
- """
28
-
29
- chat_col.markdown(description)
30
- sidebar = st.sidebar
31
- # Runtime Section
32
- runtime_container = st.container()
33
-
34
- # Parameters Section
35
- sidebar.title("**An AI@UIUC Project** (https://uiuc.ai/)")
36
- parameters_section = sidebar.expander("Parameters", expanded=False)
37
- tree_width = parameters_section.number_input("Tree Width", min_value=1, max_value=5, value=1)
38
- tree_depth = parameters_section.number_input("Tree Depth", min_value=1, max_value=8, value=3)
39
- iterations = parameters_section.number_input("Iterations", min_value=1, max_value=4, value=2)
40
- key = st.sidebar.text_input("Enter your OpenAI Api Key:", type="password")
41
- sidebar.markdown('<hr style="margin-top: 0.5rem; margin-bottom: 0.5rem;">', unsafe_allow_html=True)
42
-
43
- with sidebar:
44
- runtime_container = st.container()
45
- runtime_container.empty()
46
-
47
- runtime_messages = []
48
-
49
- def make_args(instruction, tree_depth, tree_width, iterations):
50
- parser = argparse.ArgumentParser()
51
-
52
- parser.add_argument("--strategy", default="mcts", help="Strategy to use")
53
- parser.add_argument("--language", default="py", help="Programming language")
54
- parser.add_argument("--model", default="gpt-4", help="Model type")
55
- parser.add_argument("--max_iters", default=iterations, help="Maximum iterations")
56
- parser.add_argument("--instruction", default=instruction, help="Instruction text")
57
- parser.add_argument("--verbose", action="store_true", help="Verbose output")
58
- parser.add_argument("--is_leetcode", action='store_true',
59
- help="To run the leetcode benchmark") # Temporary
60
- parser.add_argument("--n_samples", type=int,
61
- help="The number of nodes added during expansion", default=tree_width)
62
- parser.add_argument("--depth", type=int,
63
- help="Tree depth", default=tree_depth)
64
- args = parser.parse_args()
65
- return args
66
-
67
- def run_querry():
68
- if user_input:
69
-
70
- # Create a new container for each subsequent message
71
- runtime_container.write("Initiating process...")
72
-
73
- # Make it so that prints go to runtime_container writes instead
74
- old_stdout = sys.stdout
75
- sys.stdout = runtime_container
76
-
77
- with chat_col:
78
-
79
- with st.spinner('Running...'):
80
- args = make_args(user_input, tree_depth, tree_width, iterations)
81
- # main call
82
- response = lats_main(args)
83
-
84
- sys.stdout = old_stdout
85
- runtime_container.write("Response fetched.")
86
- chat_col.markdown('<hr style="margin-top: 0.5rem; margin-bottom: 0.5rem;">', unsafe_allow_html=True)
87
- chat_col.write(f"```python\n{response} \n")
88
-
89
- return response
90
-
91
- # User input section at the bottom of the page
92
- with chat_col:
93
- user_input = st.text_area("Enter your message here:", placeholder="Type your message here...", label_visibility="collapsed")
94
- button = st.button("Send")
95
-
96
- if button:
97
- fail = False
98
- if key == "":
99
- st.warning("Missing OpenAI API Key")
100
- fail = True
101
-
102
- if user_input == "":
103
- st.warning("Missing a coding problem")
104
- fail = True
105
-
106
- if (not fail):
107
- openai.api_key = key
108
- run_querry()
109
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/voc/yolov5_x-v61_fast_1xb32-50e_voc.py DELETED
@@ -1,26 +0,0 @@
1
- _base_ = './yolov5_s-v61_fast_1xb64-50e_voc.py'
2
-
3
- deepen_factor = 1.33
4
- widen_factor = 1.25
5
- train_batch_size_per_gpu = 32
6
- train_num_workers = 8
7
-
8
- # TODO: need to add pretrained_model
9
- load_from = None
10
-
11
- model = dict(
12
- backbone=dict(
13
- deepen_factor=deepen_factor,
14
- widen_factor=widen_factor,
15
- ),
16
- neck=dict(
17
- deepen_factor=deepen_factor,
18
- widen_factor=widen_factor,
19
- ),
20
- bbox_head=dict(head_module=dict(widen_factor=widen_factor)))
21
-
22
- train_dataloader = dict(
23
- batch_size=train_batch_size_per_gpu, num_workers=train_num_workers)
24
-
25
- optim_wrapper = dict(
26
- optimizer=dict(batch_size_per_gpu=train_batch_size_per_gpu))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnest269.py DELETED
@@ -1,25 +0,0 @@
1
- # model settings
2
- model = dict(
3
- type='ImageClassifier',
4
- backbone=dict(
5
- type='ResNeSt',
6
- depth=269,
7
- num_stages=4,
8
- stem_channels=128,
9
- out_indices=(3, ),
10
- style='pytorch'),
11
- neck=dict(type='GlobalAveragePooling'),
12
- head=dict(
13
- type='LinearClsHead',
14
- num_classes=1000,
15
- in_channels=2048,
16
- loss=dict(
17
- type='LabelSmoothLoss',
18
- label_smooth_val=0.1,
19
- num_classes=1000,
20
- reduction='mean',
21
- loss_weight=1.0),
22
- topk=(1, 5),
23
- cal_acc=False),
24
- train_cfg=dict(augments=dict(type='Mixup', alpha=0.2)),
25
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/needs_auth/HuggingChat.py DELETED
@@ -1,74 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import json, uuid
4
-
5
- from aiohttp import ClientSession
6
-
7
- from ...typing import AsyncGenerator
8
- from ..base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
9
-
10
-
11
- class HuggingChat(AsyncGeneratorProvider):
12
- url = "https://huggingface.co/chat"
13
- needs_auth = True
14
- working = True
15
- model = "meta-llama/Llama-2-70b-chat-hf"
16
-
17
- @classmethod
18
- async def create_async_generator(
19
- cls,
20
- model: str,
21
- messages: list[dict[str, str]],
22
- stream: bool = True,
23
- proxy: str = None,
24
- cookies: dict = None,
25
- **kwargs
26
- ) -> AsyncGenerator:
27
- model = model if model else cls.model
28
- if proxy and "://" not in proxy:
29
- proxy = f"http://{proxy}"
30
- if not cookies:
31
- cookies = get_cookies(".huggingface.co")
32
-
33
- headers = {
34
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
35
- }
36
- async with ClientSession(
37
- cookies=cookies,
38
- headers=headers
39
- ) as session:
40
- async with session.post(f"{cls.url}/conversation", json={"model": model}, proxy=proxy) as response:
41
- conversation_id = (await response.json())["conversationId"]
42
-
43
- send = {
44
- "id": str(uuid.uuid4()),
45
- "inputs": format_prompt(messages),
46
- "is_retry": False,
47
- "response_id": str(uuid.uuid4()),
48
- "web_search": False
49
- }
50
- async with session.post(f"{cls.url}/conversation/{conversation_id}", json=send, proxy=proxy) as response:
51
- async for line in response.content:
52
- line = json.loads(line[:-1])
53
- if "type" not in line:
54
- raise RuntimeError(f"Response: {line}")
55
- elif line["type"] == "stream":
56
- yield line["token"]
57
- elif line["type"] == "finalAnswer":
58
- break
59
-
60
- async with session.delete(f"{cls.url}/conversation/{conversation_id}", proxy=proxy) as response:
61
- response.raise_for_status()
62
-
63
-
64
- @classmethod
65
- @property
66
- def params(cls):
67
- params = [
68
- ("model", "str"),
69
- ("messages", "list[dict[str, str]]"),
70
- ("stream", "bool"),
71
- ("proxy", "str"),
72
- ]
73
- param = ", ".join([": ".join(p) for p in params])
74
- return f"g4f.provider.{cls.__name__} supports: ({param})"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/CoAdapter/ldm/data/utils.py DELETED
@@ -1,60 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
-
3
- import cv2
4
- import numpy as np
5
- from torchvision.transforms import transforms
6
- from torchvision.transforms.functional import to_tensor
7
- from transformers import CLIPProcessor
8
-
9
- from basicsr.utils import img2tensor
10
-
11
-
12
- class AddCannyFreezeThreshold(object):
13
-
14
- def __init__(self, low_threshold=100, high_threshold=200):
15
- self.low_threshold = low_threshold
16
- self.high_threshold = high_threshold
17
-
18
- def __call__(self, sample):
19
- # sample['jpg'] is PIL image
20
- x = sample['jpg']
21
- img = cv2.cvtColor(np.array(x), cv2.COLOR_RGB2BGR)
22
- canny = cv2.Canny(img, self.low_threshold, self.high_threshold)[..., None]
23
- sample['canny'] = img2tensor(canny, bgr2rgb=True, float32=True) / 255.
24
- sample['jpg'] = to_tensor(x)
25
- return sample
26
-
27
-
28
- class AddCannyRandomThreshold(object):
29
-
30
- def __init__(self, low_threshold=100, high_threshold=200, shift_range=50):
31
- self.low_threshold = low_threshold
32
- self.high_threshold = high_threshold
33
- self.threshold_prng = np.random.RandomState()
34
- self.shift_range = shift_range
35
-
36
- def __call__(self, sample):
37
- # sample['jpg'] is PIL image
38
- x = sample['jpg']
39
- img = cv2.cvtColor(np.array(x), cv2.COLOR_RGB2BGR)
40
- low_threshold = self.low_threshold + self.threshold_prng.randint(-self.shift_range, self.shift_range)
41
- high_threshold = self.high_threshold + self.threshold_prng.randint(-self.shift_range, self.shift_range)
42
- canny = cv2.Canny(img, low_threshold, high_threshold)[..., None]
43
- sample['canny'] = img2tensor(canny, bgr2rgb=True, float32=True) / 255.
44
- sample['jpg'] = to_tensor(x)
45
- return sample
46
-
47
-
48
- class AddStyle(object):
49
-
50
- def __init__(self, version):
51
- self.processor = CLIPProcessor.from_pretrained(version)
52
- self.pil_to_tensor = transforms.ToTensor()
53
-
54
- def __call__(self, sample):
55
- # sample['jpg'] is PIL image
56
- x = sample['jpg']
57
- style = self.processor(images=x, return_tensors="pt")['pixel_values'][0]
58
- sample['style'] = style
59
- sample['jpg'] = to_tensor(x)
60
- return sample
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/agentverse/environments/tasksolving_env/rules/executor/code_test.py DELETED
@@ -1,111 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import os
4
- import subprocess
5
- import multiprocessing
6
- from typing import TYPE_CHECKING, Any, List, Tuple
7
-
8
- from agentverse.logging import get_logger
9
- from agentverse.agents import ExecutorAgent
10
- from agentverse.message import ExecutorMessage, SolverMessage
11
- from agentverse.logging import logger
12
-
13
- from . import BaseExecutor, executor_registry
14
-
15
-
16
- def execute_command(command: str, result_list) -> str:
17
- # TODO: make it more secure
18
- result = subprocess.run(command, capture_output=True, shell=True, encoding="utf-8")
19
- result_list.append(f"STDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}")
20
- # return f"STDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}"
21
-
22
-
23
- @executor_registry.register("code-test")
24
- class CodeTestExecutor(BaseExecutor):
25
- has_test: dict = {}
26
- timeout: int = 10
27
-
28
- async def astep(
29
- self,
30
- agent: ExecutorAgent,
31
- task_description: str,
32
- solution: List[SolverMessage],
33
- *args,
34
- **kwargs,
35
- ) -> Any:
36
- solution = solution[0].content
37
- os.makedirs("tmp", exist_ok=True)
38
- self.write_to_file("tmp/main.py", solution)
39
- manager = multiprocessing.Manager()
40
- result = manager.list()
41
- if task_description not in self.has_test:
42
- response = (await agent.astep(task_description, solution)).content
43
- self.write_to_file(response["file_path"], response["code"])
44
- self.has_test[task_description] = f"python {response['file_path']}"
45
- p = multiprocessing.Process(
46
- target=execute_command, args=(f"python {response['file_path']}", result)
47
- )
48
- p.start()
49
- p.join(timeout=self.timeout + 1)
50
- if p.is_alive():
51
- p.kill()
52
- # result = execute_command(f"python {response['file_path']}")
53
- else:
54
- # result = execute_command(self.has_test[task_description])
55
- p = multiprocessing.Process(
56
- target=execute_command, args=(self.has_test[task_description], result)
57
- )
58
- p.start()
59
- p.join(timeout=self.timeout + 1)
60
- if p.is_alive():
61
- p.kill()
62
- if not result:
63
- result.append("Execution timed out.")
64
- return [ExecutorMessage(content=result[0], sender="Code Tester")]
65
-
66
- def step(
67
- self,
68
- agent: ExecutorAgent,
69
- task_description: str,
70
- solution: List[SolverMessage],
71
- *args,
72
- **kwargs,
73
- ) -> Any:
74
- solution = solution[0].content
75
- os.makedirs("tmp", exist_ok=True)
76
- self.write_to_file("tmp/main.py", solution)
77
- manager = multiprocessing.Manager()
78
- result = manager.list()
79
- if task_description not in self.has_test:
80
- response = agent.step(task_description, solution).content
81
- self.write_to_file(response["file_path"], response["code"])
82
- self.has_test[task_description] = f"python {response['file_path']}"
83
- p = multiprocessing.Process(
84
- target=execute_command, args=(f"python {response['file_path']}", result)
85
- )
86
- p.start()
87
- p.join(timeout=self.timeout + 1)
88
- if p.is_alive():
89
- p.kill()
90
- # result = execute_command(f"python {response['file_path']}")
91
- else:
92
- # result = execute_command(self.has_test[task_description])
93
- p = multiprocessing.Process(
94
- target=execute_command, args=(self.has_test[task_description], result)
95
- )
96
- p.start()
97
- p.join(timeout=self.timeout + 1)
98
- if p.is_alive():
99
- p.kill()
100
- if not result:
101
- result.append("Execution timed out.")
102
- return [ExecutorMessage(content=result[0], sender="Code Tester")]
103
-
104
- def write_to_file(self, file_name, file_content):
105
- # TODO: generalize this method to a common tool
106
- try:
107
- with open(file_name, "w") as f:
108
- f.write(file_content)
109
- f.flush()
110
- except:
111
- logger.error(f"Failed to write to {file_name}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/clickoutside/ClickOutside.js DELETED
@@ -1,2 +0,0 @@
1
- import ClickOutside from '../../../plugins/clickoutside.js'
2
- export default ClickOutside;
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridtable/SetItems.js DELETED
@@ -1,16 +0,0 @@
1
- var SetItems = function (items) {
2
- if (items === undefined) {
3
- this.items = [];
4
- } else {
5
- this.items = items;
6
- }
7
-
8
- var table = this.childrenMap.child;
9
- table.setCellsCount(this.items.length);
10
- table.updateTable(true);
11
-
12
- this.resizeController();
13
- return this;
14
- }
15
-
16
- export default SetItems;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/modal/Modal.js DELETED
@@ -1,2 +0,0 @@
1
- import { ModalBehavoir, Modal, ModalPromise, ModalClose } from '../../../plugins/modal.js';
2
- export { ModalBehavoir, Modal, ModalPromise, ModalClose };
 
 
 
spaces/AhmedM20/Email_Marketing_Content_Generator/app.py DELETED
@@ -1,68 +0,0 @@
1
- import pip
2
- import os
3
- SECRET_TOKEN = os.getenv("SECRET_TOKEN")
4
-
5
- def install(package):
6
- if hasattr(pip, 'main'):
7
- pip.main(['install', package])
8
- else:
9
- pip._internal.main(['install', package])
10
-
11
- # Example
12
- if __name__ == '__main__':
13
- install('cohere')
14
- import cohere
15
- import gradio as gr
16
- co = cohere.Client(SECRET_TOKEN) # This is your trial API key
17
-
18
- def write_email(tone="",goal="",industry="",text="",audience="",other=""):
19
- if goal=="Other":
20
- goal=other
21
- if audience=="" and industry=="":
22
- print(f'write 5 different {tone} emails to {goal} {text}')
23
- Message=f'write 5 different {tone} emails to {goal} {text}'
24
- elif audience=="":
25
- print(f'write 5 different {tone} emails to {goal} in the {industry} industry {text}')
26
- Message=f'write 5 different {tone} emails to {goal} in the {industry} industry {text}'
27
- elif industry=="":
28
- print(f'write 5 different {tone} emails for {audience} to {goal} {text}')
29
- Message=f'write 5 different {tone} emails for {audience} to {goal} {text}'
30
- else:
31
- print(f'write 5 different {tone} emails for {audience} to {goal} in the {industry} industry {text}')
32
- Message=f'write 5 different {tone} emails for {audience} to {goal} in the {industry} industry {text}'
33
-
34
- response = co.generate(
35
- model='command',
36
- prompt=Message,
37
- max_tokens=1208,
38
- temperature=1,
39
- k=0,
40
- stop_sequences=[],
41
- return_likelihoods='NONE')
42
- return(response.generations[0].text)
43
-
44
-
45
-
46
- with gr.Blocks() as demo:
47
- def other_field(choice):
48
- if choice != "Other":
49
- return gr.update(visible=False)
50
- else:
51
- return gr.update(visible=True)
52
- gr.Markdown("Create your marketing emails with AI")
53
- inp1 = gr.Radio(
54
- ["Convince to buy a product", "Recover churned customers", "Teach a new concept","Onboard users","Share product updates","Other"], value="Convince to buy a product",label = "Campagin goal"
55
- )
56
- other=gr.Textbox(visible=False,placeholder="Please enter other text",label = "Other:")
57
- inp1.input(other_field,inp1, other)
58
- inp2 = gr.Radio(
59
- ["Formal", "Semi-formal", "Informal"], value="Formal",label = "Brand Tone"
60
- )
61
- inp3 = gr.Textbox(placeholder="Example: marketing agency" ,label = "Industry")
62
- inp4= gr.Textbox(placeholder="Example:Females aged between 18 and 30" ,label = "Target audience")
63
- inp5 = gr.Textbox(placeholder="Example: I am offering 10 dollars discount for customers who cancelled their subscription and want to find a way to bring them back ", label = "Tell us more about the email you want to send")
64
- btn = gr.Button("Generate 🚀")
65
- out = gr.Textbox(label = "Here is your 5 Generated emails")
66
- btn.click(fn=write_email, inputs=[inp2, inp1,inp3,inp5,inp4,other], outputs=out)
67
-
68
- demo.launch(debug = True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AkitoP/umamusume_bert_vits2/text/japanese.py DELETED
@@ -1,704 +0,0 @@
1
- # Convert Japanese text to phonemes which is
2
- # compatible with Julius https://github.com/julius-speech/segmentation-kit
3
- import re
4
- import unicodedata
5
-
6
- from transformers import AutoTokenizer
7
-
8
- from text import punctuation, symbols
9
-
10
- try:
11
- import MeCab
12
- except ImportError as e:
13
- raise ImportError("Japanese requires mecab-python3 and unidic-lite.") from e
14
- from num2words import num2words
15
-
16
- _CONVRULES = [
17
- # Conversion of 2 letters
18
- "アァ/ a a",
19
- "イィ/ i i",
20
- "イェ/ i e",
21
- "イャ/ y a",
22
- "ウゥ/ u:",
23
- "エェ/ e e",
24
- "オォ/ o:",
25
- "カァ/ k a:",
26
- "キィ/ k i:",
27
- "クゥ/ k u:",
28
- "クャ/ ky a",
29
- "クュ/ ky u",
30
- "クョ/ ky o",
31
- "ケェ/ k e:",
32
- "コォ/ k o:",
33
- "ガァ/ g a:",
34
- "ギィ/ g i:",
35
- "グゥ/ g u:",
36
- "グャ/ gy a",
37
- "グュ/ gy u",
38
- "グョ/ gy o",
39
- "ゲェ/ g e:",
40
- "ゴォ/ g o:",
41
- "サァ/ s a:",
42
- "シィ/ sh i:",
43
- "スゥ/ s u:",
44
- "スャ/ sh a",
45
- "スュ/ sh u",
46
- "スョ/ sh o",
47
- "セェ/ s e:",
48
- "ソォ/ s o:",
49
- "ザァ/ z a:",
50
- "ジィ/ j i:",
51
- "ズゥ/ z u:",
52
- "ズャ/ zy a",
53
- "ズュ/ zy u",
54
- "ズョ/ zy o",
55
- "ゼェ/ z e:",
56
- "ゾォ/ z o:",
57
- "タァ/ t a:",
58
- "チィ/ ch i:",
59
- "ツァ/ ts a",
60
- "ツィ/ ts i",
61
- "ツゥ/ ts u:",
62
- "ツャ/ ch a",
63
- "ツュ/ ch u",
64
- "ツョ/ ch o",
65
- "ツェ/ ts e",
66
- "ツォ/ ts o",
67
- "テェ/ t e:",
68
- "トォ/ t o:",
69
- "ダァ/ d a:",
70
- "ヂィ/ j i:",
71
- "ヅゥ/ d u:",
72
- "ヅャ/ zy a",
73
- "ヅュ/ zy u",
74
- "ヅョ/ zy o",
75
- "デェ/ d e:",
76
- "ドォ/ d o:",
77
- "ナァ/ n a:",
78
- "ニィ/ n i:",
79
- "ヌゥ/ n u:",
80
- "ヌャ/ ny a",
81
- "ヌュ/ ny u",
82
- "ヌョ/ ny o",
83
- "ネェ/ n e:",
84
- "ノォ/ n o:",
85
- "ハァ/ h a:",
86
- "ヒィ/ h i:",
87
- "フゥ/ f u:",
88
- "フャ/ hy a",
89
- "フュ/ hy u",
90
- "フョ/ hy o",
91
- "ヘェ/ h e:",
92
- "ホォ/ h o:",
93
- "バァ/ b a:",
94
- "ビィ/ b i:",
95
- "ブゥ/ b u:",
96
- "フャ/ hy a",
97
- "ブュ/ by u",
98
- "フョ/ hy o",
99
- "ベェ/ b e:",
100
- "ボォ/ b o:",
101
- "パァ/ p a:",
102
- "ピィ/ p i:",
103
- "プゥ/ p u:",
104
- "プャ/ py a",
105
- "プュ/ py u",
106
- "プョ/ py o",
107
- "ペェ/ p e:",
108
- "ポォ/ p o:",
109
- "マァ/ m a:",
110
- "ミィ/ m i:",
111
- "ムゥ/ m u:",
112
- "ムャ/ my a",
113
- "ムュ/ my u",
114
- "ムョ/ my o",
115
- "メェ/ m e:",
116
- "モォ/ m o:",
117
- "ヤァ/ y a:",
118
- "ユゥ/ y u:",
119
- "ユャ/ y a:",
120
- "ユュ/ y u:",
121
- "ユョ/ y o:",
122
- "ヨォ/ y o:",
123
- "ラァ/ r a:",
124
- "リィ/ r i:",
125
- "ルゥ/ r u:",
126
- "ルャ/ ry a",
127
- "ルュ/ ry u",
128
- "ルョ/ ry o",
129
- "レェ/ r e:",
130
- "ロォ/ r o:",
131
- "ワァ/ w a:",
132
- "ヲォ/ o:",
133
- "ディ/ d i",
134
- "デェ/ d e:",
135
- "デャ/ dy a",
136
- "デュ/ dy u",
137
- "デョ/ dy o",
138
- "ティ/ t i",
139
- "テェ/ t e:",
140
- "テャ/ ty a",
141
- "テュ/ ty u",
142
- "テョ/ ty o",
143
- "スィ/ s i",
144
- "ズァ/ z u a",
145
- "ズィ/ z i",
146
- "ズゥ/ z u",
147
- "ズャ/ zy a",
148
- "ズュ/ zy u",
149
- "ズョ/ zy o",
150
- "ズェ/ z e",
151
- "ズォ/ z o",
152
- "キャ/ ky a",
153
- "キュ/ ky u",
154
- "キョ/ ky o",
155
- "シャ/ sh a",
156
- "シュ/ sh u",
157
- "シェ/ sh e",
158
- "ショ/ sh o",
159
- "チャ/ ch a",
160
- "チュ/ ch u",
161
- "チェ/ ch e",
162
- "チョ/ ch o",
163
- "トゥ/ t u",
164
- "トャ/ ty a",
165
- "トュ/ ty u",
166
- "トョ/ ty o",
167
- "ドァ/ d o a",
168
- "ドゥ/ d u",
169
- "ドャ/ dy a",
170
- "ドュ/ dy u",
171
- "ドョ/ dy o",
172
- "ドォ/ d o:",
173
- "ニャ/ ny a",
174
- "ニュ/ ny u",
175
- "ニョ/ ny o",
176
- "ヒャ/ hy a",
177
- "ヒュ/ hy u",
178
- "ヒョ/ hy o",
179
- "ミャ/ my a",
180
- "ミュ/ my u",
181
- "ミョ/ my o",
182
- "リャ/ ry a",
183
- "リュ/ ry u",
184
- "リョ/ ry o",
185
- "ギャ/ gy a",
186
- "ギュ/ gy u",
187
- "ギョ/ gy o",
188
- "ヂェ/ j e",
189
- "ヂャ/ j a",
190
- "ヂュ/ j u",
191
- "ヂョ/ j o",
192
- "ジェ/ j e",
193
- "ジャ/ j a",
194
- "ジュ/ j u",
195
- "ジョ/ j o",
196
- "ビャ/ by a",
197
- "ビュ/ by u",
198
- "ビョ/ by o",
199
- "ピャ/ py a",
200
- "ピュ/ py u",
201
- "ピョ/ py o",
202
- "ウァ/ u a",
203
- "ウィ/ w i",
204
- "ウェ/ w e",
205
- "ウォ/ w o",
206
- "ファ/ f a",
207
- "フィ/ f i",
208
- "フゥ/ f u",
209
- "フャ/ hy a",
210
- "フュ/ hy u",
211
- "フョ/ hy o",
212
- "フェ/ f e",
213
- "フォ/ f o",
214
- "ヴァ/ b a",
215
- "ヴィ/ b i",
216
- "ヴェ/ b e",
217
- "ヴォ/ b o",
218
- "ヴュ/ by u",
219
- "アー/ a:",
220
- "イー/ i:",
221
- "ウー/ u:",
222
- "エー/ e:",
223
- "オー/ o:",
224
- "カー/ k a:",
225
- "キー/ k i:",
226
- "クー/ k u:",
227
- "ケー/ k e:",
228
- "コー/ k o:",
229
- "サー/ s a:",
230
- "シー/ sh i:",
231
- "スー/ s u:",
232
- "セー/ s e:",
233
- "ソー/ s o:",
234
- "ター/ t a:",
235
- "チー/ ch i:",
236
- "ツー/ ts u:",
237
- "テー/ t e:",
238
- "トー/ t o:",
239
- "ナー/ n a:",
240
- "ニー/ n i:",
241
- "ヌ���/ n u:",
242
- "ネー/ n e:",
243
- "ノー/ n o:",
244
- "ハー/ h a:",
245
- "ヒー/ h i:",
246
- "フー/ f u:",
247
- "ヘー/ h e:",
248
- "ホー/ h o:",
249
- "マー/ m a:",
250
- "ミー/ m i:",
251
- "ムー/ m u:",
252
- "メー/ m e:",
253
- "モー/ m o:",
254
- "ラー/ r a:",
255
- "リー/ r i:",
256
- "ルー/ r u:",
257
- "レー/ r e:",
258
- "ロー/ r o:",
259
- "ガー/ g a:",
260
- "ギー/ g i:",
261
- "グー/ g u:",
262
- "ゲー/ g e:",
263
- "ゴー/ g o:",
264
- "ザー/ z a:",
265
- "ジー/ j i:",
266
- "ズー/ z u:",
267
- "ゼー/ z e:",
268
- "ゾー/ z o:",
269
- "ダー/ d a:",
270
- "ヂー/ j i:",
271
- "ヅー/ z u:",
272
- "デー/ d e:",
273
- "ドー/ d o:",
274
- "バー/ b a:",
275
- "ビー/ b i:",
276
- "ブー/ b u:",
277
- "ベー/ b e:",
278
- "ボー/ b o:",
279
- "パー/ p a:",
280
- "ピー/ p i:",
281
- "プー/ p u:",
282
- "ペー/ p e:",
283
- "ポー/ p o:",
284
- "ヤー/ y a:",
285
- "ユー/ y u:",
286
- "ヨー/ y o:",
287
- "ワー/ w a:",
288
- "ヰー/ i:",
289
- "ヱー/ e:",
290
- "ヲー/ o:",
291
- "ヴー/ b u:",
292
- # Conversion of 1 letter
293
- "ア/ a",
294
- "イ/ i",
295
- "ウ/ u",
296
- "エ/ e",
297
- "オ/ o",
298
- "カ/ k a",
299
- "キ/ k i",
300
- "ク/ k u",
301
- "ケ/ k e",
302
- "コ/ k o",
303
- "サ/ s a",
304
- "シ/ sh i",
305
- "ス/ s u",
306
- "セ/ s e",
307
- "ソ/ s o",
308
- "タ/ t a",
309
- "チ/ ch i",
310
- "ツ/ ts u",
311
- "テ/ t e",
312
- "ト/ t o",
313
- "ナ/ n a",
314
- "ニ/ n i",
315
- "ヌ/ n u",
316
- "ネ/ n e",
317
- "ノ/ n o",
318
- "ハ/ h a",
319
- "ヒ/ h i",
320
- "フ/ f u",
321
- "ヘ/ h e",
322
- "ホ/ h o",
323
- "マ/ m a",
324
- "ミ/ m i",
325
- "ム/ m u",
326
- "メ/ m e",
327
- "モ/ m o",
328
- "ラ/ r a",
329
- "リ/ r i",
330
- "ル/ r u",
331
- "レ/ r e",
332
- "ロ/ r o",
333
- "ガ/ g a",
334
- "ギ/ g i",
335
- "グ/ g u",
336
- "ゲ/ g e",
337
- "ゴ/ g o",
338
- "ザ/ z a",
339
- "ジ/ j i",
340
- "ズ/ z u",
341
- "ゼ/ z e",
342
- "ゾ/ z o",
343
- "ダ/ d a",
344
- "ヂ/ j i",
345
- "ヅ/ z u",
346
- "デ/ d e",
347
- "ド/ d o",
348
- "バ/ b a",
349
- "ビ/ b i",
350
- "ブ/ b u",
351
- "ベ/ b e",
352
- "ボ/ b o",
353
- "パ/ p a",
354
- "ピ/ p i",
355
- "プ/ p u",
356
- "ペ/ p e",
357
- "ポ/ p o",
358
- "ヤ/ y a",
359
- "ユ/ y u",
360
- "ヨ/ y o",
361
- "ワ/ w a",
362
- "ヰ/ i",
363
- "ヱ/ e",
364
- "ヲ/ o",
365
- "ン/ N",
366
- "ッ/ q",
367
- "ヴ/ b u",
368
- "ー/:", #这个不起作用
369
- # Try converting broken text
370
- "ァ/ a",
371
- "ィ/ i",
372
- "ゥ/ u",
373
- "ェ/ e",
374
- "ォ/ o",
375
- "ヮ/ w a",
376
- "ォ/ o",
377
- # Symbols
378
- "、/ ,",
379
- "。/ .",
380
- "!/ !",
381
- "?/ ?",
382
- "・/ ,",
383
- ]
384
-
385
- _COLON_RX = re.compile(":+")
386
- _REJECT_RX = re.compile("[^ a-zA-Z:,.?]")
387
-
388
-
389
- def _makerulemap():
390
- l = [tuple(x.split("/")) for x in _CONVRULES]
391
- return tuple({k: v for k, v in l if len(k) == i} for i in (1, 2))
392
-
393
-
394
- _RULEMAP1, _RULEMAP2 = _makerulemap()
395
-
396
-
397
- def kata2phoneme(text: str) -> str:
398
- """Convert katakana text to phonemes."""
399
- text = text.strip()
400
- res = []
401
- while text:
402
- if len(text) >= 2:
403
- x = _RULEMAP2.get(text[:2])
404
- if x is not None:
405
- text = text[2:]
406
- res += x.split(" ")[1:]
407
- continue
408
- x = _RULEMAP1.get(text[0])
409
- if x is not None:
410
- text = text[1:]
411
- res += x.split(" ")[1:]
412
- continue
413
- res.append(text[0])
414
- text = text[1:]
415
- # res = _COLON_RX.sub(":", res)
416
- return res
417
-
418
-
419
- _KATAKANA = "".join(chr(ch) for ch in range(ord("ァ"), ord("ン") + 1))
420
- _HIRAGANA = "".join(chr(ch) for ch in range(ord("ぁ"), ord("ん") + 1))
421
- _HIRA2KATATRANS = str.maketrans(_HIRAGANA, _KATAKANA)
422
-
423
-
424
- def hira2kata(text: str) -> str:
425
- text = text.translate(_HIRA2KATATRANS)
426
- return text.replace("う゛", "ヴ")
427
-
428
-
429
- _SYMBOL_TOKENS = set(list("・、。?!"))
430
- _NO_YOMI_TOKENS = set(list("「」『』―()[][]"))
431
- _TAGGER = MeCab.Tagger()
432
-
433
-
434
- def text2kata(text: str) -> str:
435
- parsed = _TAGGER.parse(text)
436
- res = []
437
- for line in parsed.split("\n"):
438
- if line == "EOS":
439
- break
440
- parts = line.split("\t")
441
-
442
- word, yomi = parts[0], parts[1]
443
- if yomi:
444
- res.append(yomi)
445
- else:
446
- if word in _SYMBOL_TOKENS:
447
- res.append(word)
448
- elif word in ("っ", "ッ"):
449
- res.append("ッ")
450
- elif word in _NO_YOMI_TOKENS:
451
- pass
452
- else:
453
- res.append(word)
454
- return hira2kata("".join(res))
455
-
456
-
457
- def text2sep_kata(text: str) -> (list, list):
458
- parsed = _TAGGER.parse(text)
459
- res = []
460
- sep = []
461
- for line in parsed.split("\n"):
462
- if line == "EOS":
463
- break
464
- parts = line.split("\t")
465
-
466
- word, yomi = parts[0], parts[1]
467
- if yomi:
468
- res.append(yomi)
469
- else:
470
- if word in _SYMBOL_TOKENS:
471
- res.append(word)
472
- elif word in ("っ", "ッ"):
473
- res.append("ッ")
474
- elif word in _NO_YOMI_TOKENS:
475
- pass
476
- else:
477
- res.append(word)
478
- sep.append(word)
479
- return sep, [hira2kata(i) for i in res]
480
-
481
-
482
- _ALPHASYMBOL_YOMI = {
483
- "#": "シャープ",
484
- "%": "パーセント",
485
- "&": "アンド",
486
- "+": "プラス",
487
- "-": "マイナス",
488
- ":": "コロン",
489
- ";": "セミコロン",
490
- "<": "小なり",
491
- "=": "イコール",
492
- ">": "大なり",
493
- "@": "アット",
494
- "a": "エー",
495
- "b": "ビー",
496
- "c": "シー",
497
- "d": "ディー",
498
- "e": "イー",
499
- "f": "エフ",
500
- "g": "ジー",
501
- "h": "エイチ",
502
- "i": "アイ",
503
- "j": "ジェー",
504
- "k": "ケー",
505
- "l": "エル",
506
- "m": "エム",
507
- "n": "エヌ",
508
- "o": "オー",
509
- "p": "ピー",
510
- "q": "キュー",
511
- "r": "アール",
512
- "s": "エス",
513
- "t": "ティー",
514
- "u": "ユー",
515
- "v": "ブイ",
516
- "w": "ダブリュー",
517
- "x": "エックス",
518
- "y": "ワイ",
519
- "z": "ゼット",
520
- "α": "アルファ",
521
- "β": "ベータ",
522
- "γ": "ガンマ",
523
- "δ": "デルタ",
524
- "ε": "イプシロン",
525
- "ζ": "ゼータ",
526
- "η": "イータ",
527
- "θ": "シータ",
528
- "ι": "イオタ",
529
- "κ": "カッパ",
530
- "λ": "ラムダ",
531
- "μ": "ミュー",
532
- "ν": "ニュー",
533
- "ξ": "クサイ",
534
- "ο": "オミクロン",
535
- "π": "パイ",
536
- "ρ": "ロー",
537
- "σ": "シグマ",
538
- "τ": "タウ",
539
- "υ": "ウプシロン",
540
- "φ": "ファイ",
541
- "χ": "カイ",
542
- "ψ": "プサイ",
543
- "ω": "オメガ",
544
- }
545
-
546
-
547
- _NUMBER_WITH_SEPARATOR_RX = re.compile("[0-9]{1,3}(,[0-9]{3})+")
548
- _CURRENCY_MAP = {"$": "ドル", "¥": "円", "£": "ポンド", "€": "ユーロ"}
549
- _CURRENCY_RX = re.compile(r"([$¥£€])([0-9.]*[0-9])")
550
- _NUMBER_RX = re.compile(r"[0-9]+(\.[0-9]+)?")
551
-
552
-
553
- def japanese_convert_numbers_to_words(text: str) -> str:
554
- res = _NUMBER_WITH_SEPARATOR_RX.sub(lambda m: m[0].replace(",", ""), text)
555
- res = _CURRENCY_RX.sub(lambda m: m[2] + _CURRENCY_MAP.get(m[1], m[1]), res)
556
- res = _NUMBER_RX.sub(lambda m: num2words(m[0], lang="ja"), res)
557
- return res
558
-
559
-
560
- def japanese_convert_alpha_symbols_to_words(text: str) -> str:
561
- return "".join([_ALPHASYMBOL_YOMI.get(ch, ch) for ch in text.lower()])
562
-
563
-
564
- def japanese_text_to_phonemes(text: str) -> str:
565
- """Convert Japanese text to phonemes."""
566
- res = unicodedata.normalize("NFKC", text)
567
- res = japanese_convert_numbers_to_words(res)
568
- # res = japanese_convert_alpha_symbols_to_words(res)
569
- res = text2kata(res)
570
- res = kata2phoneme(res)
571
- return res
572
-
573
-
574
- def is_japanese_character(char):
575
- # 定义日语文字系统的 Unicode 范围
576
- japanese_ranges = [
577
- (0x3040, 0x309F), # 平假名
578
- (0x30A0, 0x30FF), # 片假名
579
- (0x4E00, 0x9FFF), # 汉字 (CJK Unified Ideographs)
580
- (0x3400, 0x4DBF), # 汉字扩展 A
581
- (0x20000, 0x2A6DF), # 汉字扩展 B
582
- # 可以根据需要添加其他汉字扩展范围
583
- ]
584
-
585
- # 将字符的 Unicode 编码转换为整数
586
- char_code = ord(char)
587
-
588
- # 检查字符是否在任何一个日语范围内
589
- for start, end in japanese_ranges:
590
- if start <= char_code <= end:
591
- return True
592
-
593
- return False
594
-
595
-
596
- rep_map = {
597
- ":": ",",
598
- ";": ",",
599
- ",": ",",
600
- "。": ".",
601
- "!": "!",
602
- "?": "?",
603
- "\n": ".",
604
- "·": ",",
605
- "、": ",",
606
- "…": "...",
607
- }
608
-
609
-
610
- def replace_punctuation(text):
611
- pattern = re.compile("|".join(re.escape(p) for p in rep_map.keys()))
612
-
613
- replaced_text = pattern.sub(lambda x: rep_map[x.group()], text)
614
-
615
- replaced_text = re.sub(
616
- r"[^\u3040-\u309F\u30A0-\u30FF\u4E00-\u9FFF\u3400-\u4DBF"
617
- + "".join(punctuation)
618
- + r"]+",
619
- "",
620
- replaced_text,
621
- )
622
-
623
- return replaced_text
624
-
625
-
626
- def text_normalize(text):
627
- res = unicodedata.normalize("NFKC", text)
628
- res = japanese_convert_numbers_to_words(res)
629
- # res = "".join([i for i in res if is_japanese_character(i)])
630
- res = replace_punctuation(res)
631
- return res
632
-
633
-
634
- def distribute_phone(n_phone, n_word):
635
- phones_per_word = [0] * n_word
636
- for task in range(n_phone):
637
- min_tasks = min(phones_per_word)
638
- min_index = phones_per_word.index(min_tasks)
639
- phones_per_word[min_index] += 1
640
- return phones_per_word
641
-
642
-
643
- tokenizer = AutoTokenizer.from_pretrained("./bert/bert-base-japanese-v3")
644
-
645
-
646
- def g2p(norm_text):
647
- sep_text, sep_kata = text2sep_kata(norm_text)
648
- sep_tokenized = [tokenizer.tokenize(i) for i in sep_text]
649
- sep_phonemes = [kata2phoneme(i) for i in sep_kata]
650
- # 异常处理,MeCab不认识的词的话会一路传到这里来,然后炸掉。目前来看只有那些超级稀有的生僻词会出现这种情况
651
- for i in sep_phonemes:
652
- for j in i:
653
- assert j in symbols, (sep_text, sep_kata, sep_phonemes)
654
-
655
- word2ph = []
656
- for token, phoneme in zip(sep_tokenized, sep_phonemes):
657
- phone_len = len(phoneme)
658
- word_len = len(token)
659
-
660
- aaa = distribute_phone(phone_len, word_len)
661
- word2ph += aaa
662
- phones = ["_"] + [j for i in sep_phonemes for j in i] + ["_"]
663
- tones = [0 for i in phones]
664
- word2ph = [1] + word2ph + [1]
665
- return phones, tones, word2ph
666
-
667
- if __name__ == "__main__":
668
- tokenizer = AutoTokenizer.from_pretrained("./bert/bert-base-japanese-v3")
669
- text = "だったら私、スズカさんと同じチームに入りたいです! スズカさんの走りを毎日近くで、なんなら真横から見ていたいので!"
670
- #print(_TAGGER.parse(text))
671
- # nodes = [{"surface": "こんにちは", "pos": "感動詞:*:*:*", "pron": "コンニチワ", "c_type": "*", "c_form": "*", "accent_type": 0, "accent_con_type": "-1", "chain_flag": -1}]
672
- nodes = [{"surface":"こんにちは","pron": "コンニチワ","pos": "感動詞:*:*:*",}]
673
- from text.japanese_bert import get_bert_feature
674
- import pyopenjtalk
675
- from marine.predict import Predictor
676
- from marine.utils.openjtalk_util import convert_njd_feature_to_marine_feature
677
- text = text_normalize(text)
678
- NJD_NODES = pyopenjtalk.run_frontend(text)
679
- predictor = Predictor()
680
- # important_info = [{"string":i["string"],"pron":i["pron"],"acc":i["acc"]}for i in pyopenjtalk.estimate_accent(NJD_NODES)]
681
- print(text)
682
-
683
- marine_feature = convert_njd_feature_to_marine_feature(NJD_NODES)
684
- results = predictor.predict([marine_feature])
685
- for mora,acc in zip(results["mora"][0],results["accent_status"][0]):
686
- print(f"{mora}:{acc}")
687
- # for i in pyopenjtalk.estimate_accent(NJD_NODES):
688
- # print(f"{i['string']}:{i['pron']}:{i['acc']}")
689
- # info = pyopenjtalk.extract_fullcontext(text,run_marine=True)
690
- # info_nomarine = pyopenjtalk.extract_fullcontext(text,run_marine=False)
691
- # # nodes = pyopenjtalk
692
- # # print(info)
693
- # for i,j in zip(info,info_nomarine):
694
- # print(i)
695
- # print(j)
696
- # print("\n")
697
- # predictor = Predictor()
698
- #print(pyopenjtalk.estimate_accent(text))
699
- # output = predictor.predict([nodes],accent_represent_mode="high_low")
700
- #print(output)
701
- # phones, tones, word2ph = g2p(text)
702
- # bert = get_bert_feature(text, word2ph)
703
-
704
- # print(phones, tones, word2ph, bert.shape)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Akmyradov/TurkmenTTSweSTT/uroman/README.md DELETED
@@ -1,165 +0,0 @@
1
- # uroman
2
-
3
- *uroman* is a *universal romanizer*. It converts text in any script to the Latin alphabet.
4
-
5
- Version: 1.2.8
6
- Release date: April 23, 2021
7
- Author: Ulf Hermjakob, USC Information Sciences Institute
8
-
9
-
10
- ### Usage
11
- ```bash
12
- $ uroman.pl [-l <lang-code>] [--chart] [--no-cache] < STDIN
13
- where the optional <lang-code> is a 3-letter languages code, e.g. ara, bel, bul, deu, ell, eng, fas,
14
- grc, ell, eng, heb, kaz, kir, lav, lit, mkd, mkd2, oss, pnt, pus, rus, srp, srp2, tur, uig, ukr, yid.
15
- --chart specifies chart output (in JSON format) to represent alternative romanizations.
16
- --no-cache disables caching.
17
- ```
18
- ### Examples
19
- ```bash
20
- $ bin/uroman.pl < text/zho.txt
21
- $ bin/uroman.pl -l tur < text/tur.txt
22
- $ bin/uroman.pl -l heb --chart < text/heb.txt
23
- $ bin/uroman.pl < test/multi-script.txt > test/multi-script.uroman.txt
24
- ```
25
-
26
- Identifying the input as Arabic, Belarusian, Bulgarian, English, Farsi, German,
27
- Ancient Greek, Modern Greek, Pontic Greek, Hebrew, Kazakh, Kyrgyz, Latvian,
28
- Lithuanian, North Macedonian, Russian, Serbian, Turkish, Ukrainian, Uyghur or
29
- Yiddish will improve romanization for those languages as some letters in those
30
- languages have different sound values from other languages using the same script
31
- (French, Russian, Hebrew respectively).
32
- No effect for other languages in this version.
33
-
34
- ### Bibliography
35
- Ulf Hermjakob, Jonathan May, and Kevin Knight. 2018. Out-of-the-box universal romanization tool uroman. In Proceedings of the 56th Annual Meeting of Association for Computational Linguistics, Demo Track. ACL-2018 Best Demo Paper Award. [Paper in ACL Anthology](https://www.aclweb.org/anthology/P18-4003) | [Poster](https://www.isi.edu/~ulf/papers/poster-uroman-acl2018.pdf) | [BibTex](https://www.aclweb.org/anthology/P18-4003.bib)
36
-
37
- ### Change History
38
- Changes in version 1.2.8
39
- * Updated to Unicode 13.0 (2021), which supports several new scripts (10% larger UnicodeData.txt).
40
- * Improved support for Georgian.
41
- * Preserve various symbols (as opposed to mapping to the symbols' names).
42
- * Various small improvements.
43
-
44
- Changes in version 1.2.7
45
- * Improved support for Pashto.
46
-
47
- Changes in version 1.2.6
48
- * Improved support for Ukrainian, Russian and Ogham (ancient Irish script).
49
- * Added support for English Braille.
50
- * Added alternative Romanization for North Macedonian and Serbian (mkd2/srp2)
51
- reflecting a casual style that many native speakers of those languages use
52
- when writing text in Latin script, e.g. non-accented single letters (e.g. "s")
53
- rather than phonetically motivated combinations of letters (e.g. "sh").
54
- * When a line starts with "::lcode xyz ", the new uroman version will switch to
55
- that language for that line. This is used for the new reference test file.
56
- * Various small improvements.
57
-
58
- Changes in version 1.2.5
59
- * Improved support for Armenian and eight languages using Cyrillic scripts.
60
- -- For Serbian and Macedonian, which are often written in both Cyrillic
61
- and Latin scripts, uroman will map both official versions to the same
62
- romanized text, e.g. both "Ниш" and "Niš" will be mapped to "Nish" (which
63
- properly reflects the pronunciation of the city's name).
64
- For both Serbian and Macedonian, casual writers often use a simplified
65
- Latin form without diacritics, e.g. "s" to represent not only Cyrillic "с"
66
- and Latin "s", but also "ш" or "š", even if this conflates "s" and "sh" and
67
- other such pairs. The casual romanization can be simulated by using
68
- alternative uroman language codes "srp2" and "mkd2", which romanize
69
- both "Ниш" and "Niš" to "Nis" to reflect the casual Latin spelling.
70
- * Various small improvements.
71
-
72
- Changes in version 1.2.4
73
- * Bug-fix that generated two emtpy lines for each empty line in cache mode.
74
-
75
- Changes in version 1.2
76
- * Run-time improvement based on (1) token-based caching and (2) shortcut
77
- romanization (identity) of ASCII strings for default 1-best (non-chart)
78
- output. Speed-up by a factor of 10 for Bengali and Uyghur on medium and
79
- large size texts.
80
- * Incremental improvements for Farsi, Amharic, Russian, Hebrew and related
81
- languages.
82
- * Richer lattice structure (more alternatives) for "Romanization" of English
83
- to support better matching to romanizations of other languages.
84
- Changes output only when --chart option is specified. No change in output for
85
- default 1-best output, which for ASCII characters is always the input string.
86
-
87
- Changes in version 1.1 (major upgrade)
88
- * Offers chart output (in JSON format) to represent alternative romanizations.
89
- -- Location of first character is defined to be "line: 1, start:0, end:0".
90
- * Incremental improvements of Hebrew and Greek romanization; Chinese numbers.
91
- * Improved web-interface at http://www.isi.edu/~ulf/uroman.html
92
- -- Shows corresponding original and romanization text in red
93
- when hovering over a text segment.
94
- -- Shows alternative romanizations when hovering over romanized text
95
- marked by dotted underline.
96
- -- Added right-to-left script detection and improved display for right-to-left
97
- script text (as determined line by line).
98
- -- On-page support for some scripts that are often not pre-installed on users'
99
- computers (Burmese, Egyptian, Klingon).
100
-
101
- Changes in version 1.0 (major upgrade)
102
- * Upgraded principal internal data structure from string to lattice.
103
- * Improvements mostly in vowelization of South and Southeast Asian languages.
104
- * Vocalic 'r' more consistently treated as vowel (no additional vowel added).
105
- * Repetition signs (Japanese/Chinese/Thai/Khmer/Lao) are mapped to superscript 2.
106
- * Japanese Katakana middle dots now mapped to ASCII space.
107
- * Tibetan intersyllabic mark now mapped to middle dot (U+00B7).
108
- * Some corrections regarding analysis of Chinese numbers.
109
- * Many more foreign diacritics and punctuation marks dropped or mapped to ASCII.
110
- * Zero-width characters dropped, except line/sentence-initial byte order marks.
111
- * Spaces normalized to ASCII space.
112
- * Fixed bug that in some cases mapped signs (such as dagger or bullet) to their verbal descriptions.
113
- * Tested against previous version of uroman with a new uroman visual diff tool.
114
- * Almost an order of magnitude faster.
115
-
116
- Changes in version 0.7 (minor upgrade)
117
- * Added script uroman-quick.pl for Arabic script languages, incl. Uyghur.
118
- Much faster, pre-caching mapping of Arabic to Latin characters, simple greedy processing.
119
- Will not convert material from non-Arabic blocks such as any (somewhat unusual) Cyrillic
120
- or Chinese characters in Uyghur texts.
121
-
122
- Changes in version 0.6 (minor upgrade)
123
- * Added support for two letter characters used in Uzbek:
124
- (1) character "ʻ" ("modifier letter turned comma", which modifies preceding "g" and "u" letters)
125
- (2) character "ʼ" ("modifier letter apostrophe", which Uzbek uses to mark a glottal stop).
126
- Both are now mapped to "'" (plain ASCII apostrophe).
127
- * Added support for Uyghur vowel characters such as "ې" (Arabic e) and "ۆ" (Arabic oe)
128
- even when they are not preceded by "ئ" (yeh with hamza above).
129
- * Added support for Arabic semicolon "؛", Arabic ligature forms for phrases such as "ﷺ"
130
- ("sallallahou alayhe wasallam" = "prayer of God be upon him and his family and peace")
131
- * Added robustness for Arabic letter presentation forms (initial/medial/final/isolated).
132
- However, it is strongly recommended to normalize any presentation form Arabic letters
133
- to their non-presentation form before calling uroman.
134
- * Added force flush directive ($|=1;).
135
-
136
- Changes in version 0.5 (minor upgrade)
137
- * Improvements for Uyghur (make sure to use language option: -l uig)
138
-
139
- Changes in version 0.4 (minor upgrade)
140
- * Improvements for Thai (special cases for vowel/consonant reordering, e.g. for "sara o"; dropped some aspiration 'h's)
141
- * Minor change for Arabic (added "alef+fathatan" = "an")
142
-
143
- New features in version 0.3
144
- * Covers Mandarin (Chinese)
145
- * Improved romanization for numerous languages
146
- * Preserves capitalization (e.g. from Latin, Cyrillic, Greek scripts)
147
- * Maps from native digits to Western numbers
148
- * Faster for South Asian languages
149
-
150
- ### Other features
151
- * Web interface: http://www.isi.edu/~ulf/uroman.html
152
- * Vowelization is provided when locally computable, e.g. for many South Asian languages and Tibetan.
153
-
154
- ### Limitations
155
- * The current version of uroman has a few limitations, some of which we plan to address in future versions.
156
- For Japanese, *uroman* currently romanizes hiragana and katakana as expected, but kanji are interpreted as Chinese characters and romanized as such.
157
- For Egyptian hieroglyphs, only single-sound phonetic characters and numbers are currently romanized.
158
- For Linear B, only phonetic syllabic characters are romanized.
159
- For some other extinct scripts such as cuneiform, no romanization is provided.
160
- * A romanizer is not a full transliterator. For example, this version of
161
- uroman does not vowelize text that lacks explicit vowelization such as
162
- normal text in Arabic and Hebrew (without diacritics/points).
163
-
164
- ### Acknowledgments
165
- This research is based upon work supported in part by the Office of the Director of National Intelligence (ODNI), Intelligence Advanced Research Projects Activity (IARPA), via contract # FA8650-17-C-9116, and by research sponsored by Air Force Research Laboratory (AFRL) under agreement number FA8750-19-1-1000. The views and conclusions contained herein are those of the authors and should not be interpreted as necessarily representing the official policies, either expressed or implied, of ODNI, IARPA, Air Force Laboratory, DARPA, or the U.S. Government. The U.S. Government is authorized to reproduce and distribute reprints for governmental purposes notwithstanding any copyright annotation therein.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlanMars/QYL-AI-Space/modules/models/base_model.py DELETED
@@ -1,592 +0,0 @@
1
- from __future__ import annotations
2
- from typing import TYPE_CHECKING, List
3
-
4
- import logging
5
- import json
6
- import commentjson as cjson
7
- import os
8
- import sys
9
- import requests
10
- import urllib3
11
- import traceback
12
- import pathlib
13
-
14
- from tqdm import tqdm
15
- import colorama
16
- from duckduckgo_search import ddg
17
- import asyncio
18
- import aiohttp
19
- from enum import Enum
20
-
21
- from ..presets import *
22
- from ..llama_func import *
23
- from ..utils import *
24
- from .. import shared
25
- from ..config import retrieve_proxy
26
-
27
-
28
- class ModelType(Enum):
29
- Unknown = -1
30
- OpenAI = 0
31
- ChatGLM = 1
32
- LLaMA = 2
33
- XMChat = 3
34
- StableLM = 4
35
- MOSS = 5
36
- YuanAI = 6
37
-
38
- @classmethod
39
- def get_type(cls, model_name: str):
40
- model_type = None
41
- model_name_lower = model_name.lower()
42
- if "gpt" in model_name_lower:
43
- model_type = ModelType.OpenAI
44
- elif "chatglm" in model_name_lower:
45
- model_type = ModelType.ChatGLM
46
- elif "llama" in model_name_lower or "alpaca" in model_name_lower:
47
- model_type = ModelType.LLaMA
48
- elif "xmchat" in model_name_lower:
49
- model_type = ModelType.XMChat
50
- elif "stablelm" in model_name_lower:
51
- model_type = ModelType.StableLM
52
- elif "moss" in model_name_lower:
53
- model_type = ModelType.MOSS
54
- elif "yuanai" in model_name_lower:
55
- model_type = ModelType.YuanAI
56
- else:
57
- model_type = ModelType.Unknown
58
- return model_type
59
-
60
-
61
- class BaseLLMModel:
62
- def __init__(
63
- self,
64
- model_name,
65
- system_prompt="",
66
- temperature=1.0,
67
- top_p=1.0,
68
- n_choices=1,
69
- stop=None,
70
- max_generation_token=None,
71
- presence_penalty=0,
72
- frequency_penalty=0,
73
- logit_bias=None,
74
- user="",
75
- ) -> None:
76
- self.history = []
77
- self.all_token_counts = []
78
- self.model_name = model_name
79
- self.model_type = ModelType.get_type(model_name)
80
- try:
81
- self.token_upper_limit = MODEL_TOKEN_LIMIT[model_name]
82
- except KeyError:
83
- self.token_upper_limit = DEFAULT_TOKEN_LIMIT
84
- self.interrupted = False
85
- self.system_prompt = system_prompt
86
- self.api_key = None
87
- self.need_api_key = False
88
- self.single_turn = False
89
-
90
- self.temperature = temperature
91
- self.top_p = top_p
92
- self.n_choices = n_choices
93
- self.stop_sequence = stop
94
- self.max_generation_token = None
95
- self.presence_penalty = presence_penalty
96
- self.frequency_penalty = frequency_penalty
97
- self.logit_bias = logit_bias
98
- self.user_identifier = user
99
-
100
- def get_answer_stream_iter(self):
101
- """stream predict, need to be implemented
102
- conversations are stored in self.history, with the most recent question, in OpenAI format
103
- should return a generator, each time give the next word (str) in the answer
104
- """
105
- logging.warning("stream predict not implemented, using at once predict instead")
106
- response, _ = self.get_answer_at_once()
107
- yield response
108
-
109
- def get_answer_at_once(self):
110
- """predict at once, need to be implemented
111
- conversations are stored in self.history, with the most recent question, in OpenAI format
112
- Should return:
113
- the answer (str)
114
- total token count (int)
115
- """
116
- logging.warning("at once predict not implemented, using stream predict instead")
117
- response_iter = self.get_answer_stream_iter()
118
- count = 0
119
- for response in response_iter:
120
- count += 1
121
- return response, sum(self.all_token_counts) + count
122
-
123
- def billing_info(self):
124
- """get billing infomation, inplement if needed"""
125
- logging.warning("billing info not implemented, using default")
126
- return BILLING_NOT_APPLICABLE_MSG
127
-
128
- def count_token(self, user_input):
129
- """get token count from input, implement if needed"""
130
- # logging.warning("token count not implemented, using default")
131
- return len(user_input)
132
-
133
- def stream_next_chatbot(self, inputs, chatbot, fake_input=None, display_append=""):
134
- def get_return_value():
135
- return chatbot, status_text
136
-
137
- status_text = i18n("开始实时传输回答……")
138
- if fake_input:
139
- chatbot.append((fake_input, ""))
140
- else:
141
- chatbot.append((inputs, ""))
142
-
143
- user_token_count = self.count_token(inputs)
144
- self.all_token_counts.append(user_token_count)
145
- logging.debug(f"输入token计数: {user_token_count}")
146
-
147
- stream_iter = self.get_answer_stream_iter()
148
-
149
- for partial_text in stream_iter:
150
- chatbot[-1] = (chatbot[-1][0], partial_text + display_append)
151
- self.all_token_counts[-1] += 1
152
- status_text = self.token_message()
153
- yield get_return_value()
154
- if self.interrupted:
155
- self.recover()
156
- break
157
- self.history.append(construct_assistant(partial_text))
158
-
159
- def next_chatbot_at_once(self, inputs, chatbot, fake_input=None, display_append=""):
160
- if fake_input:
161
- chatbot.append((fake_input, ""))
162
- else:
163
- chatbot.append((inputs, ""))
164
- if fake_input is not None:
165
- user_token_count = self.count_token(fake_input)
166
- else:
167
- user_token_count = self.count_token(inputs)
168
- self.all_token_counts.append(user_token_count)
169
- ai_reply, total_token_count = self.get_answer_at_once()
170
- self.history.append(construct_assistant(ai_reply))
171
- if fake_input is not None:
172
- self.history[-2] = construct_user(fake_input)
173
- chatbot[-1] = (chatbot[-1][0], ai_reply + display_append)
174
- if fake_input is not None:
175
- self.all_token_counts[-1] += count_token(construct_assistant(ai_reply))
176
- else:
177
- self.all_token_counts[-1] = total_token_count - sum(self.all_token_counts)
178
- status_text = self.token_message()
179
- return chatbot, status_text
180
-
181
- def handle_file_upload(self, files, chatbot):
182
- """if the model accepts multi modal input, implement this function"""
183
- status = gr.Markdown.update()
184
- if files:
185
- construct_index(self.api_key, file_src=files)
186
- status = "索引构建完成"
187
- return gr.Files.update(), chatbot, status
188
-
189
- def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot):
190
- fake_inputs = None
191
- display_append = []
192
- limited_context = False
193
- fake_inputs = real_inputs
194
- if files:
195
- from llama_index.indices.vector_store.base_query import GPTVectorStoreIndexQuery
196
- from llama_index.indices.query.schema import QueryBundle
197
- from langchain.embeddings.huggingface import HuggingFaceEmbeddings
198
- from langchain.chat_models import ChatOpenAI
199
- from llama_index import (
200
- GPTSimpleVectorIndex,
201
- ServiceContext,
202
- LangchainEmbedding,
203
- OpenAIEmbedding,
204
- )
205
- limited_context = True
206
- msg = "加载索引中……"
207
- logging.info(msg)
208
- # yield chatbot + [(inputs, "")], msg
209
- index = construct_index(self.api_key, file_src=files)
210
- assert index is not None, "获取索引失败"
211
- msg = "索引获取成功,生成回答中……"
212
- logging.info(msg)
213
- if local_embedding or self.model_type != ModelType.OpenAI:
214
- embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name = "sentence-transformers/distiluse-base-multilingual-cased-v2"))
215
- else:
216
- embed_model = OpenAIEmbedding()
217
- # yield chatbot + [(inputs, "")], msg
218
- with retrieve_proxy():
219
- prompt_helper = PromptHelper(
220
- max_input_size=4096,
221
- num_output=5,
222
- max_chunk_overlap=20,
223
- chunk_size_limit=600,
224
- )
225
- from llama_index import ServiceContext
226
-
227
- service_context = ServiceContext.from_defaults(
228
- prompt_helper=prompt_helper, embed_model=embed_model
229
- )
230
- query_object = GPTVectorStoreIndexQuery(
231
- index.index_struct,
232
- service_context=service_context,
233
- similarity_top_k=5,
234
- vector_store=index._vector_store,
235
- docstore=index._docstore,
236
- response_synthesizer=None
237
- )
238
- query_bundle = QueryBundle(real_inputs)
239
- nodes = query_object.retrieve(query_bundle)
240
- reference_results = [n.node.text for n in nodes]
241
- reference_results = add_source_numbers(reference_results, use_source=False)
242
- display_append = add_details(reference_results)
243
- display_append = "\n\n" + "".join(display_append)
244
- real_inputs = (
245
- replace_today(PROMPT_TEMPLATE)
246
- .replace("{query_str}", real_inputs)
247
- .replace("{context_str}", "\n\n".join(reference_results))
248
- .replace("{reply_language}", reply_language)
249
- )
250
- elif use_websearch:
251
- limited_context = True
252
- search_results = ddg(real_inputs, max_results=5)
253
- reference_results = []
254
- for idx, result in enumerate(search_results):
255
- logging.debug(f"搜索结果{idx + 1}:{result}")
256
- domain_name = urllib3.util.parse_url(result["href"]).host
257
- reference_results.append([result["body"], result["href"]])
258
- display_append.append(
259
- # f"{idx+1}. [{domain_name}]({result['href']})\n"
260
- f"<li><a href=\"{result['href']}\" target=\"_blank\">{domain_name}</a></li>\n"
261
- )
262
- reference_results = add_source_numbers(reference_results)
263
- display_append = "<ol>\n\n" + "".join(display_append) + "</ol>"
264
- real_inputs = (
265
- replace_today(WEBSEARCH_PTOMPT_TEMPLATE)
266
- .replace("{query}", real_inputs)
267
- .replace("{web_results}", "\n\n".join(reference_results))
268
- .replace("{reply_language}", reply_language)
269
- )
270
- else:
271
- display_append = ""
272
- return limited_context, fake_inputs, display_append, real_inputs, chatbot
273
-
274
- def predict(
275
- self,
276
- inputs,
277
- chatbot,
278
- stream=False,
279
- use_websearch=False,
280
- files=None,
281
- reply_language="中文",
282
- should_check_token_count=True,
283
- ): # repetition_penalty, top_k
284
-
285
- status_text = "开始生成回答……"
286
- logging.info(
287
- "输入为:" + colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL
288
- )
289
- if should_check_token_count:
290
- yield chatbot + [(inputs, "")], status_text
291
- if reply_language == "跟随问题语言(不稳定)":
292
- reply_language = "the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch."
293
-
294
- limited_context, fake_inputs, display_append, inputs, chatbot = self.prepare_inputs(real_inputs=inputs, use_websearch=use_websearch, files=files, reply_language=reply_language, chatbot=chatbot)
295
- yield chatbot + [(fake_inputs, "")], status_text
296
-
297
- if (
298
- self.need_api_key and
299
- self.api_key is None
300
- and not shared.state.multi_api_key
301
- ):
302
- status_text = STANDARD_ERROR_MSG + NO_APIKEY_MSG
303
- logging.info(status_text)
304
- chatbot.append((inputs, ""))
305
- if len(self.history) == 0:
306
- self.history.append(construct_user(inputs))
307
- self.history.append("")
308
- self.all_token_counts.append(0)
309
- else:
310
- self.history[-2] = construct_user(inputs)
311
- yield chatbot + [(inputs, "")], status_text
312
- return
313
- elif len(inputs.strip()) == 0:
314
- status_text = STANDARD_ERROR_MSG + NO_INPUT_MSG
315
- logging.info(status_text)
316
- yield chatbot + [(inputs, "")], status_text
317
- return
318
-
319
- if self.single_turn:
320
- self.history = []
321
- self.all_token_counts = []
322
- self.history.append(construct_user(inputs))
323
-
324
- try:
325
- if stream:
326
- logging.debug("使用流式传输")
327
- iter = self.stream_next_chatbot(
328
- inputs,
329
- chatbot,
330
- fake_input=fake_inputs,
331
- display_append=display_append,
332
- )
333
- for chatbot, status_text in iter:
334
- yield chatbot, status_text
335
- else:
336
- logging.debug("不使用流式传输")
337
- chatbot, status_text = self.next_chatbot_at_once(
338
- inputs,
339
- chatbot,
340
- fake_input=fake_inputs,
341
- display_append=display_append,
342
- )
343
- yield chatbot, status_text
344
- except Exception as e:
345
- traceback.print_exc()
346
- status_text = STANDARD_ERROR_MSG + str(e)
347
- yield chatbot, status_text
348
-
349
- if len(self.history) > 1 and self.history[-1]["content"] != inputs:
350
- logging.info(
351
- "回答为:"
352
- + colorama.Fore.BLUE
353
- + f"{self.history[-1]['content']}"
354
- + colorama.Style.RESET_ALL
355
- )
356
-
357
- if limited_context:
358
- # self.history = self.history[-4:]
359
- # self.all_token_counts = self.all_token_counts[-2:]
360
- self.history = []
361
- self.all_token_counts = []
362
-
363
- max_token = self.token_upper_limit - TOKEN_OFFSET
364
-
365
- if sum(self.all_token_counts) > max_token and should_check_token_count:
366
- count = 0
367
- while (
368
- sum(self.all_token_counts)
369
- > self.token_upper_limit * REDUCE_TOKEN_FACTOR
370
- and sum(self.all_token_counts) > 0
371
- ):
372
- count += 1
373
- del self.all_token_counts[0]
374
- del self.history[:2]
375
- logging.info(status_text)
376
- status_text = f"为了防止token超限,模型忘记了早期的 {count} 轮对话"
377
- yield chatbot, status_text
378
-
379
- self.auto_save(chatbot)
380
-
381
- def retry(
382
- self,
383
- chatbot,
384
- stream=False,
385
- use_websearch=False,
386
- files=None,
387
- reply_language="中文",
388
- ):
389
- logging.debug("重试中……")
390
- if len(self.history) > 0:
391
- inputs = self.history[-2]["content"]
392
- del self.history[-2:]
393
- self.all_token_counts.pop()
394
- elif len(chatbot) > 0:
395
- inputs = chatbot[-1][0]
396
- else:
397
- yield chatbot, f"{STANDARD_ERROR_MSG}上下文是空的"
398
- return
399
-
400
- iter = self.predict(
401
- inputs,
402
- chatbot,
403
- stream=stream,
404
- use_websearch=use_websearch,
405
- files=files,
406
- reply_language=reply_language,
407
- )
408
- for x in iter:
409
- yield x
410
- logging.debug("重试完毕")
411
-
412
- # def reduce_token_size(self, chatbot):
413
- # logging.info("开始减少token数量……")
414
- # chatbot, status_text = self.next_chatbot_at_once(
415
- # summarize_prompt,
416
- # chatbot
417
- # )
418
- # max_token_count = self.token_upper_limit * REDUCE_TOKEN_FACTOR
419
- # num_chat = find_n(self.all_token_counts, max_token_count)
420
- # logging.info(f"previous_token_count: {self.all_token_counts}, keeping {num_chat} chats")
421
- # chatbot = chatbot[:-1]
422
- # self.history = self.history[-2*num_chat:] if num_chat > 0 else []
423
- # self.all_token_counts = self.all_token_counts[-num_chat:] if num_chat > 0 else []
424
- # msg = f"保留了最近{num_chat}轮对话"
425
- # logging.info(msg)
426
- # logging.info("减少token数量完毕")
427
- # return chatbot, msg + "," + self.token_message(self.all_token_counts if len(self.all_token_counts) > 0 else [0])
428
-
429
- def interrupt(self):
430
- self.interrupted = True
431
-
432
- def recover(self):
433
- self.interrupted = False
434
-
435
- def set_token_upper_limit(self, new_upper_limit):
436
- self.token_upper_limit = new_upper_limit
437
- print(f"token上限设置为{new_upper_limit}")
438
-
439
- def set_temperature(self, new_temperature):
440
- self.temperature = new_temperature
441
-
442
- def set_top_p(self, new_top_p):
443
- self.top_p = new_top_p
444
-
445
- def set_n_choices(self, new_n_choices):
446
- self.n_choices = new_n_choices
447
-
448
- def set_stop_sequence(self, new_stop_sequence: str):
449
- new_stop_sequence = new_stop_sequence.split(",")
450
- self.stop_sequence = new_stop_sequence
451
-
452
- def set_max_tokens(self, new_max_tokens):
453
- self.max_generation_token = new_max_tokens
454
-
455
- def set_presence_penalty(self, new_presence_penalty):
456
- self.presence_penalty = new_presence_penalty
457
-
458
- def set_frequency_penalty(self, new_frequency_penalty):
459
- self.frequency_penalty = new_frequency_penalty
460
-
461
- def set_logit_bias(self, logit_bias):
462
- logit_bias = logit_bias.split()
463
- bias_map = {}
464
- encoding = tiktoken.get_encoding("cl100k_base")
465
- for line in logit_bias:
466
- word, bias_amount = line.split(":")
467
- if word:
468
- for token in encoding.encode(word):
469
- bias_map[token] = float(bias_amount)
470
- self.logit_bias = bias_map
471
-
472
- def set_user_identifier(self, new_user_identifier):
473
- self.user_identifier = new_user_identifier
474
-
475
- def set_system_prompt(self, new_system_prompt):
476
- self.system_prompt = new_system_prompt
477
-
478
- def set_key(self, new_access_key):
479
- self.api_key = new_access_key.strip()
480
- msg = i18n("API密钥更改为了") + hide_middle_chars(self.api_key)
481
- logging.info(msg)
482
- return self.api_key, msg
483
-
484
- def set_single_turn(self, new_single_turn):
485
- self.single_turn = new_single_turn
486
-
487
- def reset(self):
488
- self.history = []
489
- self.all_token_counts = []
490
- self.interrupted = False
491
- pathlib.Path(os.path.join(HISTORY_DIR, self.user_identifier, new_auto_history_filename(os.path.join(HISTORY_DIR, self.user_identifier)))).touch()
492
- return [], self.token_message([0])
493
-
494
- def delete_first_conversation(self):
495
- if self.history:
496
- del self.history[:2]
497
- del self.all_token_counts[0]
498
- return self.token_message()
499
-
500
- def delete_last_conversation(self, chatbot):
501
- if len(chatbot) > 0 and STANDARD_ERROR_MSG in chatbot[-1][1]:
502
- msg = "由于包含报错信息,只删除chatbot记录"
503
- chatbot.pop()
504
- return chatbot, self.history
505
- if len(self.history) > 0:
506
- self.history.pop()
507
- self.history.pop()
508
- if len(chatbot) > 0:
509
- msg = "删除了一组chatbot对话"
510
- chatbot.pop()
511
- if len(self.all_token_counts) > 0:
512
- msg = "删除了一组对话的token计数记录"
513
- self.all_token_counts.pop()
514
- msg = "删除了一组对话"
515
- return chatbot, msg
516
-
517
- def token_message(self, token_lst=None):
518
- if token_lst is None:
519
- token_lst = self.all_token_counts
520
- token_sum = 0
521
- for i in range(len(token_lst)):
522
- token_sum += sum(token_lst[: i + 1])
523
- return i18n("Token 计数: ") + f"{sum(token_lst)}" + i18n(",本次对话累计消耗了 ") + f"{token_sum} tokens"
524
-
525
- def save_chat_history(self, filename, chatbot, user_name):
526
- if filename == "":
527
- return
528
- if not filename.endswith(".json"):
529
- filename += ".json"
530
- return save_file(filename, self.system_prompt, self.history, chatbot, user_name)
531
-
532
- def auto_save(self, chatbot):
533
- history_file_path = get_history_filepath(self.user_identifier)
534
- save_file(history_file_path, self.system_prompt, self.history, chatbot, self.user_identifier)
535
-
536
- def export_markdown(self, filename, chatbot, user_name):
537
- if filename == "":
538
- return
539
- if not filename.endswith(".md"):
540
- filename += ".md"
541
- return save_file(filename, self.system_prompt, self.history, chatbot, user_name)
542
-
543
- def load_chat_history(self, filename, user_name):
544
- logging.debug(f"{user_name} 加载对话历史中……")
545
- logging.info(f"filename: {filename}")
546
- if type(filename) != str and filename is not None:
547
- filename = filename.name
548
- try:
549
- if "/" not in filename:
550
- history_file_path = os.path.join(HISTORY_DIR, user_name, filename)
551
- else:
552
- history_file_path = filename
553
- with open(history_file_path, "r") as f:
554
- json_s = json.load(f)
555
- try:
556
- if type(json_s["history"][0]) == str:
557
- logging.info("历史记录格式为旧版,正在转换……")
558
- new_history = []
559
- for index, item in enumerate(json_s["history"]):
560
- if index % 2 == 0:
561
- new_history.append(construct_user(item))
562
- else:
563
- new_history.append(construct_assistant(item))
564
- json_s["history"] = new_history
565
- logging.info(new_history)
566
- except:
567
- pass
568
- logging.debug(f"{user_name} 加载对话历史完毕")
569
- self.history = json_s["history"]
570
- return os.path.basename(filename), json_s["system"], json_s["chatbot"]
571
- except:
572
- # 没有对话历史或者对话历史解析失败
573
- logging.info(f"没有找到对话历史记录 {filename}")
574
- return gr.update(), self.system_prompt, gr.update()
575
-
576
- def auto_load(self):
577
- if self.user_identifier == "":
578
- self.reset()
579
- return self.system_prompt, gr.update()
580
- history_file_path = get_history_filepath(self.user_identifier)
581
- filename, system_prompt, chatbot = self.load_chat_history(history_file_path, self.user_identifier)
582
- return system_prompt, chatbot
583
-
584
- def like(self):
585
- """like the last response, implement if needed
586
- """
587
- return gr.update()
588
-
589
- def dislike(self):
590
- """dislike the last response, implement if needed
591
- """
592
- return gr.update()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlhitawiMohammed22/CER_Hu-Evaluation-Metrics/test_eval_wer.py DELETED
File without changes
spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py DELETED
@@ -1,38 +0,0 @@
1
- _base_ = ['./cascade_mask_rcnn_r50_fpn_1x_coco.py']
2
-
3
- model = dict(
4
- pretrained='open-mmlab://detectron2/resnet50_caffe',
5
- backbone=dict(
6
- norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe'))
7
-
8
- img_norm_cfg = dict(
9
- mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
10
- train_pipeline = [
11
- dict(type='LoadImageFromFile'),
12
- dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
13
- dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
14
- dict(type='RandomFlip', flip_ratio=0.5),
15
- dict(type='Normalize', **img_norm_cfg),
16
- dict(type='Pad', size_divisor=32),
17
- dict(type='DefaultFormatBundle'),
18
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
19
- ]
20
- test_pipeline = [
21
- dict(type='LoadImageFromFile'),
22
- dict(
23
- type='MultiScaleFlipAug',
24
- img_scale=(1333, 800),
25
- flip=False,
26
- transforms=[
27
- dict(type='Resize', keep_ratio=True),
28
- dict(type='RandomFlip'),
29
- dict(type='Normalize', **img_norm_cfg),
30
- dict(type='Pad', size_divisor=32),
31
- dict(type='ImageToTensor', keys=['img']),
32
- dict(type='Collect', keys=['img']),
33
- ])
34
- ]
35
- data = dict(
36
- train=dict(pipeline=train_pipeline),
37
- val=dict(pipeline=test_pipeline),
38
- test=dict(pipeline=test_pipeline))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/backbones/resnext.py DELETED
@@ -1,153 +0,0 @@
1
- import math
2
-
3
- from mmcv.cnn import build_conv_layer, build_norm_layer
4
-
5
- from ..builder import BACKBONES
6
- from ..utils import ResLayer
7
- from .resnet import Bottleneck as _Bottleneck
8
- from .resnet import ResNet
9
-
10
-
11
- class Bottleneck(_Bottleneck):
12
- expansion = 4
13
-
14
- def __init__(self,
15
- inplanes,
16
- planes,
17
- groups=1,
18
- base_width=4,
19
- base_channels=64,
20
- **kwargs):
21
- """Bottleneck block for ResNeXt.
22
-
23
- If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
24
- it is "caffe", the stride-two layer is the first 1x1 conv layer.
25
- """
26
- super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
27
-
28
- if groups == 1:
29
- width = self.planes
30
- else:
31
- width = math.floor(self.planes *
32
- (base_width / base_channels)) * groups
33
-
34
- self.norm1_name, norm1 = build_norm_layer(
35
- self.norm_cfg, width, postfix=1)
36
- self.norm2_name, norm2 = build_norm_layer(
37
- self.norm_cfg, width, postfix=2)
38
- self.norm3_name, norm3 = build_norm_layer(
39
- self.norm_cfg, self.planes * self.expansion, postfix=3)
40
-
41
- self.conv1 = build_conv_layer(
42
- self.conv_cfg,
43
- self.inplanes,
44
- width,
45
- kernel_size=1,
46
- stride=self.conv1_stride,
47
- bias=False)
48
- self.add_module(self.norm1_name, norm1)
49
- fallback_on_stride = False
50
- self.with_modulated_dcn = False
51
- if self.with_dcn:
52
- fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
53
- if not self.with_dcn or fallback_on_stride:
54
- self.conv2 = build_conv_layer(
55
- self.conv_cfg,
56
- width,
57
- width,
58
- kernel_size=3,
59
- stride=self.conv2_stride,
60
- padding=self.dilation,
61
- dilation=self.dilation,
62
- groups=groups,
63
- bias=False)
64
- else:
65
- assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
66
- self.conv2 = build_conv_layer(
67
- self.dcn,
68
- width,
69
- width,
70
- kernel_size=3,
71
- stride=self.conv2_stride,
72
- padding=self.dilation,
73
- dilation=self.dilation,
74
- groups=groups,
75
- bias=False)
76
-
77
- self.add_module(self.norm2_name, norm2)
78
- self.conv3 = build_conv_layer(
79
- self.conv_cfg,
80
- width,
81
- self.planes * self.expansion,
82
- kernel_size=1,
83
- bias=False)
84
- self.add_module(self.norm3_name, norm3)
85
-
86
- if self.with_plugins:
87
- self._del_block_plugins(self.after_conv1_plugin_names +
88
- self.after_conv2_plugin_names +
89
- self.after_conv3_plugin_names)
90
- self.after_conv1_plugin_names = self.make_block_plugins(
91
- width, self.after_conv1_plugins)
92
- self.after_conv2_plugin_names = self.make_block_plugins(
93
- width, self.after_conv2_plugins)
94
- self.after_conv3_plugin_names = self.make_block_plugins(
95
- self.planes * self.expansion, self.after_conv3_plugins)
96
-
97
- def _del_block_plugins(self, plugin_names):
98
- """delete plugins for block if exist.
99
-
100
- Args:
101
- plugin_names (list[str]): List of plugins name to delete.
102
- """
103
- assert isinstance(plugin_names, list)
104
- for plugin_name in plugin_names:
105
- del self._modules[plugin_name]
106
-
107
-
108
- @BACKBONES.register_module()
109
- class ResNeXt(ResNet):
110
- """ResNeXt backbone.
111
-
112
- Args:
113
- depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
114
- in_channels (int): Number of input image channels. Default: 3.
115
- num_stages (int): Resnet stages. Default: 4.
116
- groups (int): Group of resnext.
117
- base_width (int): Base width of resnext.
118
- strides (Sequence[int]): Strides of the first block of each stage.
119
- dilations (Sequence[int]): Dilation of each stage.
120
- out_indices (Sequence[int]): Output from which stages.
121
- style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
122
- layer is the 3x3 conv layer, otherwise the stride-two layer is
123
- the first 1x1 conv layer.
124
- frozen_stages (int): Stages to be frozen (all param fixed). -1 means
125
- not freezing any parameters.
126
- norm_cfg (dict): dictionary to construct and config norm layer.
127
- norm_eval (bool): Whether to set norm layers to eval mode, namely,
128
- freeze running stats (mean and var). Note: Effect on Batch Norm
129
- and its variants only.
130
- with_cp (bool): Use checkpoint or not. Using checkpoint will save some
131
- memory while slowing down the training speed.
132
- zero_init_residual (bool): whether to use zero init for last norm layer
133
- in resblocks to let them behave as identity.
134
- """
135
-
136
- arch_settings = {
137
- 50: (Bottleneck, (3, 4, 6, 3)),
138
- 101: (Bottleneck, (3, 4, 23, 3)),
139
- 152: (Bottleneck, (3, 8, 36, 3))
140
- }
141
-
142
- def __init__(self, groups=1, base_width=4, **kwargs):
143
- self.groups = groups
144
- self.base_width = base_width
145
- super(ResNeXt, self).__init__(**kwargs)
146
-
147
- def make_res_layer(self, **kwargs):
148
- """Pack all blocks in a stage into a ``ResLayer``"""
149
- return ResLayer(
150
- groups=self.groups,
151
- base_width=self.base_width,
152
- base_channels=self.base_channels,
153
- **kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AndySAnker/DeepStruc/app.py DELETED
@@ -1,94 +0,0 @@
1
- import streamlit as st
2
- import io, os, argparse, torch, random
3
- import pytorch_lightning as pl
4
- import numpy as np
5
- from predict import main
6
- from tools.utils import plot_ls
7
-
8
- seed = 37
9
- torch.manual_seed(seed)
10
- pl.seed_everything(seed)
11
- torch.manual_seed(seed)
12
- np.random.seed(seed)
13
- random.seed(seed)
14
-
15
- st.title('DeepStruc')
16
-
17
- st.write('Welcome to DeepStruc that is a Deep Generative Model which has been trained to solve a mono-metallic structure (<200 atoms) based on a PDF!')
18
- st.write('Upload a PDF to use DeepStruc to predict the structure.')
19
-
20
-
21
- # Define the file upload widget
22
- pdf_file = st.file_uploader("Upload PDF file in .gr format", type=["gr"])
23
-
24
- # Define the form to get the other parameters
25
- num_structures = st.number_input("Number of structures to generate", min_value=1, max_value=100, value=10)
26
- structure_index = st.number_input("Index of structure to visualize", min_value=0, value=3)
27
- sigma = st.number_input("Standard deviation for sampling", min_value=0.1, value=3.0)
28
-
29
- # Define parser
30
- parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
31
- args = parser.parse_args()
32
- args.num_samples = num_structures
33
- args.index_plot = structure_index
34
- args.sigma = sigma
35
- # Fixed for DeepStruc app
36
- args.model = 'DeepStruc'
37
- args.save_path = './'
38
-
39
- # Define the predict button and its behavior
40
- if st.button("Generate structures"):
41
- if pdf_file is None:
42
- st.warning("Please upload a PDF file.")
43
- else:
44
- # Get the contents of the file as bytes
45
- file_bytes = pdf_file.read()
46
-
47
- # Save the contents of the file to disk
48
- with open("uploaded_file.gr", "wb") as f:
49
- f.write(file_bytes)
50
-
51
- df, index_highlight, these_cords = main(args)
52
-
53
- # Plot the latent space
54
- fig = plot_ls(df, index_highlight)
55
- st.pyplot(fig)
56
- st.write('**The two-dimensional latent space with location of the input.** The size of the points relates to the size of the embedded structure. Each point is coloured after its structure type, FCC (light blue), octahedral (dark grey), decahedral (orange), BCC (green), icosahedral (dark blue), HCP (pink) and SC (red). Each point in the latent space corresponds to a structure based on its simulated PDF. Test data point are plotted on top of the training and validation data, which is made semi-transparent. The latent space locations of the reconstructed structures from the input are shown with black markers and the specific reconstructed structure that is shown in the next box is shown with a black and white marker.')
57
-
58
- # Define the save directory and file name
59
- file_name = "DeepStruc_prediction.xyz"
60
-
61
- # Define a download button to download the file
62
- def download_button(file_name, button_text):
63
- with open(file_name, "rb") as f:
64
- bytes = f.read()
65
- st.download_button(
66
- label=button_text,
67
- data=bytes,
68
- file_name=file_name,
69
- mime="text/xyz",)
70
-
71
- # Save the coordinates to a file and display a download button
72
- np.savetxt(file_name, these_cords, fmt="%s")
73
- download_button(file_name, "Download XYZ file")
74
-
75
-
76
-
77
- st.subheader('Cite')
78
-
79
- st.write('If you use DeepStruc, our code or results, please consider citing our papers. Thanks in advance!')
80
-
81
- st.write('DeepStruc: Towards structure solution from pair distribution function data using deep generative models **2023** (https://pubs.rsc.org/en/content/articlehtml/2022/dd/d2dd00086e)')
82
- st.write('Characterising the atomic structure of mono-metallic nanoparticles from x-ray scattering data using conditional generative models **2020** (https://chemrxiv.org/engage/chemrxiv/article-details/60c74dd1842e6514f2db3527)')
83
-
84
- st.subheader('LICENSE')
85
-
86
- st.write('This project is licensed under the Apache License Version 2.0, January 2004 - see the LICENSE file at https://github.com/EmilSkaaning/DeepStruc/blob/main/LICENSE.md for details.')
87
- st.write("")
88
-
89
- st.subheader('Github')
90
- st.write('https://github.com/EmilSkaaning/DeepStruc')
91
-
92
- st.subheader('Questions')
93
94
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aniquel/WizApp_Code_Generator/app.py DELETED
@@ -1,31 +0,0 @@
1
- import gradio as gr
2
- import openai
3
- import os
4
-
5
- openai.api_key = os.getenv("OPENAI_API_KEY")
6
-
7
-
8
- def generate_response(text):
9
- prompt = f"Code generation:\n\n```python\n{text}\n```"
10
- response = openai.Completion.create(
11
- model=gpt-3.5-turbo,
12
- prompt=prompt,
13
- max_tokens=3000,
14
- n=1,
15
- stop=None,
16
- temperature=0.2,
17
- )
18
- message = response.choices[0].text.strip()
19
- return message
20
-
21
- iface = gr.Interface(
22
- fn=generate_response,
23
- inputs=gr.inputs.Textbox(label="Enter your code here"),
24
- outputs=gr.outputs.Textbox(label="Chatbot's response"),
25
- title="WizApp Code Generation",
26
- description="Use AI to generate code based on your input",
27
- theme="default"
28
- )
29
-
30
- if __name__ == "__main__":
31
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ariharasudhan/YoloV5/utils/segment/general.py DELETED
@@ -1,137 +0,0 @@
1
- import cv2
2
- import numpy as np
3
- import torch
4
- import torch.nn.functional as F
5
-
6
-
7
- def crop_mask(masks, boxes):
8
- """
9
- "Crop" predicted masks by zeroing out everything not in the predicted bbox.
10
- Vectorized by Chong (thanks Chong).
11
-
12
- Args:
13
- - masks should be a size [h, w, n] tensor of masks
14
- - boxes should be a size [n, 4] tensor of bbox coords in relative point form
15
- """
16
-
17
- n, h, w = masks.shape
18
- x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n)
19
- r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,w,1)
20
- c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(h,1,1)
21
-
22
- return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2))
23
-
24
-
25
- def process_mask_upsample(protos, masks_in, bboxes, shape):
26
- """
27
- Crop after upsample.
28
- proto_out: [mask_dim, mask_h, mask_w]
29
- out_masks: [n, mask_dim], n is number of masks after nms
30
- bboxes: [n, 4], n is number of masks after nms
31
- shape:input_image_size, (h, w)
32
-
33
- return: h, w, n
34
- """
35
-
36
- c, mh, mw = protos.shape # CHW
37
- masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)
38
- masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW
39
- masks = crop_mask(masks, bboxes) # CHW
40
- return masks.gt_(0.5)
41
-
42
-
43
- def process_mask(protos, masks_in, bboxes, shape, upsample=False):
44
- """
45
- Crop before upsample.
46
- proto_out: [mask_dim, mask_h, mask_w]
47
- out_masks: [n, mask_dim], n is number of masks after nms
48
- bboxes: [n, 4], n is number of masks after nms
49
- shape:input_image_size, (h, w)
50
-
51
- return: h, w, n
52
- """
53
-
54
- c, mh, mw = protos.shape # CHW
55
- ih, iw = shape
56
- masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW
57
-
58
- downsampled_bboxes = bboxes.clone()
59
- downsampled_bboxes[:, 0] *= mw / iw
60
- downsampled_bboxes[:, 2] *= mw / iw
61
- downsampled_bboxes[:, 3] *= mh / ih
62
- downsampled_bboxes[:, 1] *= mh / ih
63
-
64
- masks = crop_mask(masks, downsampled_bboxes) # CHW
65
- if upsample:
66
- masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW
67
- return masks.gt_(0.5)
68
-
69
-
70
- def scale_image(im1_shape, masks, im0_shape, ratio_pad=None):
71
- """
72
- img1_shape: model input shape, [h, w]
73
- img0_shape: origin pic shape, [h, w, 3]
74
- masks: [h, w, num]
75
- """
76
- # Rescale coordinates (xyxy) from im1_shape to im0_shape
77
- if ratio_pad is None: # calculate from im0_shape
78
- gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new
79
- pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding
80
- else:
81
- pad = ratio_pad[1]
82
- top, left = int(pad[1]), int(pad[0]) # y, x
83
- bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0])
84
-
85
- if len(masks.shape) < 2:
86
- raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}')
87
- masks = masks[top:bottom, left:right]
88
- # masks = masks.permute(2, 0, 1).contiguous()
89
- # masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0]
90
- # masks = masks.permute(1, 2, 0).contiguous()
91
- masks = cv2.resize(masks, (im0_shape[1], im0_shape[0]))
92
-
93
- if len(masks.shape) == 2:
94
- masks = masks[:, :, None]
95
- return masks
96
-
97
-
98
- def mask_iou(mask1, mask2, eps=1e-7):
99
- """
100
- mask1: [N, n] m1 means number of predicted objects
101
- mask2: [M, n] m2 means number of gt objects
102
- Note: n means image_w x image_h
103
-
104
- return: masks iou, [N, M]
105
- """
106
- intersection = torch.matmul(mask1, mask2.t()).clamp(0)
107
- union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection
108
- return intersection / (union + eps)
109
-
110
-
111
- def masks_iou(mask1, mask2, eps=1e-7):
112
- """
113
- mask1: [N, n] m1 means number of predicted objects
114
- mask2: [N, n] m2 means number of gt objects
115
- Note: n means image_w x image_h
116
-
117
- return: masks iou, (N, )
118
- """
119
- intersection = (mask1 * mask2).sum(1).clamp(0) # (N, )
120
- union = (mask1.sum(1) + mask2.sum(1))[None] - intersection # (area1 + area2) - intersection
121
- return intersection / (union + eps)
122
-
123
-
124
- def masks2segments(masks, strategy='largest'):
125
- # Convert masks(n,160,160) into segments(n,xy)
126
- segments = []
127
- for x in masks.int().cpu().numpy().astype('uint8'):
128
- c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
129
- if c:
130
- if strategy == 'concat': # concatenate all segments
131
- c = np.concatenate([x.reshape(-1, 2) for x in c])
132
- elif strategy == 'largest': # select largest segment
133
- c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2)
134
- else:
135
- c = np.zeros((0, 2)) # no segments found
136
- segments.append(c.astype('float32'))
137
- return segments
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Artples/llama-2-7b-chat/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: llama-2-7b-chat
3
- emoji: 🚀
4
- colorFrom: green
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.37.0
8
- app_file: app.py
9
- pinned: true
10
- duplicated_from: Artples/llama2-7b-chat
11
- license: apache-2.0
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/webencodings/x_user_defined.py DELETED
@@ -1,325 +0,0 @@
1
- # coding: utf-8
2
- """
3
-
4
- webencodings.x_user_defined
5
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~
6
-
7
- An implementation of the x-user-defined encoding.
8
-
9
- :copyright: Copyright 2012 by Simon Sapin
10
- :license: BSD, see LICENSE for details.
11
-
12
- """
13
-
14
- from __future__ import unicode_literals
15
-
16
- import codecs
17
-
18
-
19
- ### Codec APIs
20
-
21
- class Codec(codecs.Codec):
22
-
23
- def encode(self, input, errors='strict'):
24
- return codecs.charmap_encode(input, errors, encoding_table)
25
-
26
- def decode(self, input, errors='strict'):
27
- return codecs.charmap_decode(input, errors, decoding_table)
28
-
29
-
30
- class IncrementalEncoder(codecs.IncrementalEncoder):
31
- def encode(self, input, final=False):
32
- return codecs.charmap_encode(input, self.errors, encoding_table)[0]
33
-
34
-
35
- class IncrementalDecoder(codecs.IncrementalDecoder):
36
- def decode(self, input, final=False):
37
- return codecs.charmap_decode(input, self.errors, decoding_table)[0]
38
-
39
-
40
- class StreamWriter(Codec, codecs.StreamWriter):
41
- pass
42
-
43
-
44
- class StreamReader(Codec, codecs.StreamReader):
45
- pass
46
-
47
-
48
- ### encodings module API
49
-
50
- codec_info = codecs.CodecInfo(
51
- name='x-user-defined',
52
- encode=Codec().encode,
53
- decode=Codec().decode,
54
- incrementalencoder=IncrementalEncoder,
55
- incrementaldecoder=IncrementalDecoder,
56
- streamreader=StreamReader,
57
- streamwriter=StreamWriter,
58
- )
59
-
60
-
61
- ### Decoding Table
62
-
63
- # Python 3:
64
- # for c in range(256): print(' %r' % chr(c if c < 128 else c + 0xF700))
65
- decoding_table = (
66
- '\x00'
67
- '\x01'
68
- '\x02'
69
- '\x03'
70
- '\x04'
71
- '\x05'
72
- '\x06'
73
- '\x07'
74
- '\x08'
75
- '\t'
76
- '\n'
77
- '\x0b'
78
- '\x0c'
79
- '\r'
80
- '\x0e'
81
- '\x0f'
82
- '\x10'
83
- '\x11'
84
- '\x12'
85
- '\x13'
86
- '\x14'
87
- '\x15'
88
- '\x16'
89
- '\x17'
90
- '\x18'
91
- '\x19'
92
- '\x1a'
93
- '\x1b'
94
- '\x1c'
95
- '\x1d'
96
- '\x1e'
97
- '\x1f'
98
- ' '
99
- '!'
100
- '"'
101
- '#'
102
- '$'
103
- '%'
104
- '&'
105
- "'"
106
- '('
107
- ')'
108
- '*'
109
- '+'
110
- ','
111
- '-'
112
- '.'
113
- '/'
114
- '0'
115
- '1'
116
- '2'
117
- '3'
118
- '4'
119
- '5'
120
- '6'
121
- '7'
122
- '8'
123
- '9'
124
- ':'
125
- ';'
126
- '<'
127
- '='
128
- '>'
129
- '?'
130
- '@'
131
- 'A'
132
- 'B'
133
- 'C'
134
- 'D'
135
- 'E'
136
- 'F'
137
- 'G'
138
- 'H'
139
- 'I'
140
- 'J'
141
- 'K'
142
- 'L'
143
- 'M'
144
- 'N'
145
- 'O'
146
- 'P'
147
- 'Q'
148
- 'R'
149
- 'S'
150
- 'T'
151
- 'U'
152
- 'V'
153
- 'W'
154
- 'X'
155
- 'Y'
156
- 'Z'
157
- '['
158
- '\\'
159
- ']'
160
- '^'
161
- '_'
162
- '`'
163
- 'a'
164
- 'b'
165
- 'c'
166
- 'd'
167
- 'e'
168
- 'f'
169
- 'g'
170
- 'h'
171
- 'i'
172
- 'j'
173
- 'k'
174
- 'l'
175
- 'm'
176
- 'n'
177
- 'o'
178
- 'p'
179
- 'q'
180
- 'r'
181
- 's'
182
- 't'
183
- 'u'
184
- 'v'
185
- 'w'
186
- 'x'
187
- 'y'
188
- 'z'
189
- '{'
190
- '|'
191
- '}'
192
- '~'
193
- '\x7f'
194
- '\uf780'
195
- '\uf781'
196
- '\uf782'
197
- '\uf783'
198
- '\uf784'
199
- '\uf785'
200
- '\uf786'
201
- '\uf787'
202
- '\uf788'
203
- '\uf789'
204
- '\uf78a'
205
- '\uf78b'
206
- '\uf78c'
207
- '\uf78d'
208
- '\uf78e'
209
- '\uf78f'
210
- '\uf790'
211
- '\uf791'
212
- '\uf792'
213
- '\uf793'
214
- '\uf794'
215
- '\uf795'
216
- '\uf796'
217
- '\uf797'
218
- '\uf798'
219
- '\uf799'
220
- '\uf79a'
221
- '\uf79b'
222
- '\uf79c'
223
- '\uf79d'
224
- '\uf79e'
225
- '\uf79f'
226
- '\uf7a0'
227
- '\uf7a1'
228
- '\uf7a2'
229
- '\uf7a3'
230
- '\uf7a4'
231
- '\uf7a5'
232
- '\uf7a6'
233
- '\uf7a7'
234
- '\uf7a8'
235
- '\uf7a9'
236
- '\uf7aa'
237
- '\uf7ab'
238
- '\uf7ac'
239
- '\uf7ad'
240
- '\uf7ae'
241
- '\uf7af'
242
- '\uf7b0'
243
- '\uf7b1'
244
- '\uf7b2'
245
- '\uf7b3'
246
- '\uf7b4'
247
- '\uf7b5'
248
- '\uf7b6'
249
- '\uf7b7'
250
- '\uf7b8'
251
- '\uf7b9'
252
- '\uf7ba'
253
- '\uf7bb'
254
- '\uf7bc'
255
- '\uf7bd'
256
- '\uf7be'
257
- '\uf7bf'
258
- '\uf7c0'
259
- '\uf7c1'
260
- '\uf7c2'
261
- '\uf7c3'
262
- '\uf7c4'
263
- '\uf7c5'
264
- '\uf7c6'
265
- '\uf7c7'
266
- '\uf7c8'
267
- '\uf7c9'
268
- '\uf7ca'
269
- '\uf7cb'
270
- '\uf7cc'
271
- '\uf7cd'
272
- '\uf7ce'
273
- '\uf7cf'
274
- '\uf7d0'
275
- '\uf7d1'
276
- '\uf7d2'
277
- '\uf7d3'
278
- '\uf7d4'
279
- '\uf7d5'
280
- '\uf7d6'
281
- '\uf7d7'
282
- '\uf7d8'
283
- '\uf7d9'
284
- '\uf7da'
285
- '\uf7db'
286
- '\uf7dc'
287
- '\uf7dd'
288
- '\uf7de'
289
- '\uf7df'
290
- '\uf7e0'
291
- '\uf7e1'
292
- '\uf7e2'
293
- '\uf7e3'
294
- '\uf7e4'
295
- '\uf7e5'
296
- '\uf7e6'
297
- '\uf7e7'
298
- '\uf7e8'
299
- '\uf7e9'
300
- '\uf7ea'
301
- '\uf7eb'
302
- '\uf7ec'
303
- '\uf7ed'
304
- '\uf7ee'
305
- '\uf7ef'
306
- '\uf7f0'
307
- '\uf7f1'
308
- '\uf7f2'
309
- '\uf7f3'
310
- '\uf7f4'
311
- '\uf7f5'
312
- '\uf7f6'
313
- '\uf7f7'
314
- '\uf7f8'
315
- '\uf7f9'
316
- '\uf7fa'
317
- '\uf7fb'
318
- '\uf7fc'
319
- '\uf7fd'
320
- '\uf7fe'
321
- '\uf7ff'
322
- )
323
-
324
- ### Encoding table
325
- encoding_table = codecs.charmap_build(decoding_table)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/config/_validate_pyproject/fastjsonschema_validations.py DELETED
The diff for this file is too large to render. See raw diff
 
spaces/Autodog/nova/Dockerfile DELETED
@@ -1,9 +0,0 @@
1
- FROM node:18
2
- RUN git clone https://github.com/supercyx3/ChatGPT-Next-Web-LangChain.git
3
- WORKDIR "ChatGPT-Next-Web-LangChain"
4
-
5
- RUN yarn install && yarn build
6
- # 设置环境变量
7
- #ENV BASE_URL=https://api.nova-oss.com
8
- EXPOSE 3000
9
- CMD yarn start
 
 
 
 
 
 
 
 
 
 
spaces/Banbri/zcvzcv/src/lib/loadImage.ts DELETED
@@ -1,14 +0,0 @@
1
- export async function loadImage(image: string): Promise<HTMLImageElement> {
2
- const img = new Image();
3
- img.src = image;
4
-
5
- const imgOnLoad = () => {
6
- return new Promise<HTMLImageElement>((resolve, reject) => {
7
- img.onload = () => { resolve(img) };
8
- img.onerror = (err) => { reject(err) };
9
- })
10
- };
11
-
12
- const loadImg = await imgOnLoad();
13
- return loadImg
14
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Apklz.md DELETED
@@ -1,70 +0,0 @@
1
-
2
- <h1>¿Qué es Apklz y cómo usarlo? </h1>
3
- <p>Si usted es un usuario de Android, es posible que haya llegado a través del término "apklz" o visto archivos con la extensión . apklz. Pero ¿qué es exactamente apklz y cómo se puede utilizar en su dispositivo? En este artículo, le explicaremos todo lo que necesita saber sobre los archivos apklz, incluyendo sus características, beneficios, riesgos y precauciones. Al final de este artículo, podrás descargar, instalar, actualizar, desinstalar y administrar archivos apklz como un profesional. </p>
4
- <h2>Introducción</h2>
5
- <h3>¿Qué es apklz y qué significa? </h3>
6
- <p>Apklz es un formato de archivo que significa paquete Android Lempel-Ziv. Es una versión comprimida del formato de archivo estándar de Android Package (APK), que se utiliza para distribuir e instalar aplicaciones en dispositivos Android. Los archivos apklz se crean mediante un algoritmo de compresión sin pérdidas llamado Lempel-Ziv (LZ), que reduce el tamaño del archivo sin afectar la calidad o la funcionalidad de la aplicación. </p>
7
- <h2>apklz</h2><br /><p><b><b>Download File</b> <a href="https://bltlly.com/2v6Lys">https://bltlly.com/2v6Lys</a></b></p><br /><br />
8
- <h3>¿Por qué la gente usa archivos apklz y cuáles son los beneficios? </h3>
9
- <p>La gente usa archivos apklz por varias razones, como:</p>
10
- <ul>
11
- <li>Para ahorrar espacio de almacenamiento en sus dispositivos. Los archivos apklz suelen ser más pequeños que los archivos APK, lo que significa que ocupan menos espacio en la memoria del dispositivo. </li>
12
- <li>Para descargar aplicaciones más rápido. Los archivos apklz son más rápidos de descargar que los archivos APK, especialmente si tiene una conexión a Internet lenta o limitada. </li>
13
- <li>Para acceder a aplicaciones que no están disponibles en la tienda oficial de Google Play. Los archivos apklz le permiten instalar aplicaciones que están restringidas o eliminadas de Play Store debido a varias razones, como limitaciones regionales, problemas legales o violaciones de políticas. </li>
14
- <li>Para probar versiones nuevas o modificadas de aplicaciones. Los archivos de Apklz le permiten probar versiones beta, versiones modificadas o versiones personalizadas de aplicaciones que ofrecen características adicionales o mejoras que no están disponibles en las versiones originales. </li>
15
- </ul>
16
-
17
- <p>Para descargar e instalar archivos apklz en su dispositivo Android, debe seguir estos pasos:</p>
18
- <ol>
19
- <li>Encuentra una fuente confiable para descargar archivos apklz. Puede utilizar sitios web como <a href="( 1 )">Apk Plz</a>, <a href="( 2 )">Google Play</a>, o <a href="( 3 )">Scamvoid</a> para buscar y descargar archivos apklz de su elección. Asegúrese de revisar las calificaciones, reseñas y comentarios de otros usuarios antes de descargar cualquier archivo. </li>
20
- <li>Habilitar fuentes desconocidas en su dispositivo. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo. Esto le permitirá instalar aplicaciones desde fuentes distintas de Play Store.</li>
21
- <li>Busque el archivo apklz descargado en su dispositivo. Puede usar una aplicación de administrador de archivos como ES File Explorer o File Manager para encontrar el archivo en su carpeta de descargas o en cualquier otra ubicación donde lo haya guardado. </li>
22
- <li>Toque en el archivo y siga las instrucciones en la pantalla para instalarlo. Es posible que necesite conceder algunos permisos o aceptar algunos términos y condiciones antes de que se complete la instalación. </li>
23
- </ol>
24
- <p>Felicidades, ha instalado con éxito un archivo apklz en su dispositivo. Ahora puede iniciar y usar la aplicación como lo haría normalmente. </p>
25
- <h2>Características de Apklz</h2>
26
- <h3>¿Cuáles son algunas de las características que hacen que los archivos apklz sean diferentes de otros formatos de archivo? </h3>
27
- <p>Los archivos apklz tienen algunas características únicas que los distinguen de otros formatos de archivo, como:</p>
28
- <ul>
29
- <li>Son autónomos y ejecutables. Los archivos Apklz contienen todos los componentes y recursos necesarios para ejecutar una aplicación, como código, imágenes, sonidos, fuentes, etc. No requieren archivos o bibliotecas adicionales para funcionar. </li>
30
- <li>Son compatibles y portátiles. Los archivos apklz pueden ejecutarse en cualquier dispositivo Android que admita el nivel mínimo de API y las especificaciones de hardware requeridas por la aplicación. No dependen del fabricante, modelo o versión del sistema operativo del dispositivo. </li>
31
-
32
- </ul>
33
- <h3>¿Cómo acceder y administrar archivos apklz en su dispositivo? </h3>
34
- <p>Para acceder y administrar archivos apklz en su dispositivo, debe usar una aplicación de administrador de archivos que admita la extensión . apklz. Algunas de las aplicaciones populares de administrador de archivos que pueden manejar archivos apklz son:</p>
35
- <tabla>
36
- <tr><th>Nombre de la aplicación</th><th>Descripción</th><th>Descargar enlace</th></tr>
37
- <tr><td>ES File Explorer</td><td>Una aplicación de administrador de archivos potente y versátil que puede acceder y gestionar todo tipo de archivos en su dispositivo, incluidos los archivos apklz. También tiene un administrador de aplicaciones incorporado que puede instalar, desinstalar, hacer copias de seguridad y restaurar archivos apklz. </td><td><a href="">ES File Explorer</a></td></tr>
38
- <tr><td>Administrador de archivos</td><td>Una aplicación de administrador de archivos sencilla y fácil de usar que puede acceder fácilmente y administrar archivos apklz en su dispositivo. También tiene una sección dedicada a los archivos apklz donde puede ver sus detalles, instalarlos o eliminarlos. </td><td><a href="">Administrador de archivos</a></td></tr>
39
- <tr><td>X-plore File Manager</td><td>Una aplicación de administrador de archivos de doble panel que puede acceder y administrar archivos apklz en su dispositivo. También tiene un administrador de aplicaciones incorporado que puede instalar, desinstalar, hacer copias de seguridad y restaurar archivos apklz. </td><td><a href="">X-plore File Manager</a></td></tr>
40
- </tabla>
41
- <h3>¿Cómo actualizar y desinstalar archivos apklz? </h3>
42
- <p>Para actualizar y desinstalar archivos apklz en su dispositivo, debe seguir estos pasos:</p>
43
- <p></p>
44
- <ol>
45
- <li>Para actualizar un archivo apklz, necesita descargar la última versión del archivo desde una fuente confiable e instalarlo sobre la versión existente. Puede utilizar los mismos pasos como se mencionó anteriormente para instalar un archivo apklz. Alternativamente, puede usar una aplicación de administrador de archivos que tenga un administrador de aplicaciones incorporado para buscar actualizaciones e instalarlas automáticamente. </li>
46
- <li>Para desinstalar un archivo apklz, es necesario ir a Configuración > Aplicaciones > ApkLZ (o el nombre de la aplicación) y toque en Desinstalar. También puede utilizar una aplicación de administrador de archivos que tiene un administrador de aplicaciones incorporado para desinstalar archivos apklz fácilmente. </li>
47
- </ol>
48
-
49
- <h3>¿Cuáles son algunos de los riesgos y desafíos de usar archivos apklz? </h3>
50
- <p>Si bien los archivos apklz tienen muchas ventajas, también vienen con algunos riesgos y desafíos que debe tener en cuenta, como:</p>
51
- <ul>
52
- <li>No pueden ser seguros. Los archivos apklz no son verificados o aprobados por Google o cualquier otra autoridad. Pueden contener malware, virus, spyware u otros elementos dañinos que pueden dañar su dispositivo o comprometer su privacidad. También pueden tener errores, errores o problemas de compatibilidad que pueden afectar el rendimiento o la funcionalidad de su dispositivo o aplicación. </li>
53
- <li>Pueden no ser legales o éticas. Los archivos de Apklz pueden violar los derechos de propiedad intelectual o los términos de servicio de los desarrolladores o editores originales de las aplicaciones. También pueden contener contenido pirateado, agrietado, hackeado o modificado que es ilegal o poco ético de usar. </li>
54
- <li>No pueden ser actualizados o soportados. Es posible que los archivos apklz no reciban </a> o <a href="">Titanium Backup</a> para respaldar y restaurar sus datos en caso de problemas o problemas con sus archivos apklz. </li>
55
- </ul>
56
- <h2>Conclusión</h2>
57
- <p>Los archivos de Apklz son una forma conveniente y eficiente de descargar e instalar aplicaciones en su dispositivo Android. Ofrecen muchos beneficios, como ahorrar espacio de almacenamiento, descargar más rápido, acceder a aplicaciones no disponibles y probar versiones nuevas o modificadas de aplicaciones. Sin embargo, también vienen con algunos riesgos y desafíos, como ser inseguro, ilegal o sin apoyo. Por lo tanto, debe tener cuidado y precaución al usar archivos apklz. Necesita descargarlos solo de fuentes confiables, escanearlos con software antivirus, leer sus permisos y términos de servicio, y hacer copias de seguridad de sus datos regularmente. Siguiendo estos consejos y precauciones, puedes disfrutar usando archivos apklz sin ninguna preocupación. </p>
58
-
59
- <h2>Preguntas frecuentes</h2>
60
- <h3>¿Cuál es la diferencia entre apklz y apk? </h3>
61
- <p>Apklz y apk son formatos de archivo que se utilizan para distribuir e instalar aplicaciones en dispositivos Android. La principal diferencia entre ellos es que los archivos apklz son versiones comprimidas de archivos apk, lo que significa que tienen tamaños de archivo más pequeños y velocidades de descarga más rápidas. Sin embargo, también tienen algunos inconvenientes, como ser menos seguro, menos legal y menos compatible que los archivos apk. </p>
62
- <h3>¿Cómo puedo abrir un archivo apklz en mi PC o Mac? </h3>
63
- <p>Para abrir un archivo apklz en su PC o Mac, es necesario utilizar un software que puede extraer o descomprimir el archivo. Algunos de los programas que pueden abrir archivos apklz son <a href="">7-Zip</a>, <a href="">WinRAR</a>, o <a href="">PeaZip</a>. Una vez que extraiga el archivo, obtendrá un archivo apk que puede abrir con un emulador de Android como <a href="">BlueStacks</a>, <a href="">NoxPlayer</a>, o <a href=">MEmu</a>. </p>
64
- <h3>¿Cómo puedo crear un archivo apklz desde un archivo apk? </h3>
65
- <p>Para crear un archivo apklz a partir de un archivo apk, es necesario utilizar un software que puede comprimir o convertir el archivo. Algunos de los programas que pueden crear archivos apklz son <a href="">ApkLZ Creator</a>, <a href="">ApkLZ Maker</a>, o <a href=">ApkLZ Converter</a>. También puede utilizar herramientas en línea como <a href="">Compresor de ApkLZ en línea</a> o <a href="">Generador de ApkLZ en línea</a>. </p>
66
- <h3>¿Cómo puedo editar o modificar un archivo apklz? </h3>
67
- <p>Para editar o modificar un archivo apklz, es necesario utilizar un software que puede editar o modificar el archivo. Algunos de los programas que pueden editar o modificar archivos apklz son <a href="">APK Editor</a>, <a href="">APK Tool</a>, o <a href=">APK Studio</a>. También puede utilizar herramientas en línea como <a href="">Editor de ApkLZ en línea</a> o <a href="">Modificador de ApkLZ en línea</a>. </p>
68
- <h3>¿Cómo puedo comprobar si un archivo apklz es seguro o no? </h3> 64aa2da5cf<br />
69
- <br />
70
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Cazador Asesino Mod Apk Ilimitado Todo.md DELETED
@@ -1,81 +0,0 @@
1
-
2
- <h1>Hunter Assassin Mod APK: Un juego sigiloso y estratégico para los usuarios de Android</h1>
3
- <p>Si usted está buscando un juego móvil divertido y desafiante que pone a prueba sus habilidades de sigilo y estrategia, es posible que desee probar Hunter Assassin. Este es un juego popular que tiene más de 100 millones de descargas en Google Play Store. Pero lo que si quieres disfrutar del juego con recursos y características ilimitadas? Ahí es donde Hunter Assassin Mod APK entra en juego. En este artículo, te contaremos todo lo que necesitas saber sobre Hunter Assassin y su versión modificada. </p>
4
- <h2>cazador asesino mod apk ilimitado todo</h2><br /><p><b><b>Download File</b> ---> <a href="https://bltlly.com/2v6Khv">https://bltlly.com/2v6Khv</a></b></p><br /><br />
5
- <h2>¿Qué es Hunter Assassin? </h2>
6
- <p>Hunter Assassin es un juego para móviles desarrollado por Ruby Game Studio. Es un juego de ritmo rápido que requiere controlar a un asesino que tiene un cuchillo mortal. Su misión es eliminar todos los objetivos en cada nivel sin ser detectado por los guardias. Tienes que usar tu velocidad, agilidad y astucia para sorprender a tus enemigos y derrotarlos uno por uno. </p>
7
- <h3>El juego de Hunter Assassin</h3>
8
- <p>El juego de Hunter Assassin es simple pero adictivo. Tienes que tocar la pantalla para mover a tu asesino y deslizar el dedo para cambiar la dirección. Tienes que evitar los rayos de linterna de los guardias y esconderte detrás de las paredes, cajas o barriles. Tienes que llegar a tu objetivo y tocarlos para matarlos. También puede recoger gemas y llaves en el camino, que se pueden utilizar para desbloquear nuevos asesinos con diferentes habilidades y habilidades. </p>
9
- <h3>Las características de Hunter Assassin</h3>
10
- <p>Hunter Assassin tiene muchas características que lo convierten en un juego agradable para jugadores de todas las edades. Algunas de estas características son:</p>
11
- <ul>
12
- <li>Más de 500 niveles con dificultad y variedad crecientes. </li>
13
- <li>Diferentes tipos de guardias con diferentes comportamientos y armas. </li>
14
- <li>Diferentes tipos de asesinos con diferentes estadísticas y apariencias. </li>
15
- <li>Controles simples e intuitivos que son fáciles de aprender. </li>
16
- <li>Gráficos suaves y coloridos que crean una atmósfera vívida. </li>
17
-
18
- </ul>
19
- <h2>¿Qué es Hunter Assassin Mod APK? </h2>
20
- <p>Hunter Assassin Mod APK es una versión modificada del juego original que le da acceso a recursos y características ilimitadas. Con esta versión modificada, puedes disfrutar del juego sin limitaciones ni restricciones. Puedes jugar con dinero ilimitado, gemas, llaves y diamantes. También puede desbloquear todos los asesinos y niveles sin gastar dinero real. También puede eliminar los anuncios que podrían interrumpir su experiencia de juego. </p>
21
- <p></p>
22
- <h3>Los beneficios de Hunter Assassin Mod APK</h3>
23
- <p>Hunter Assassin Mod APK tiene muchos beneficios que lo hacen una mejor opción que el juego original. Algunos de estos beneficios son:</p>
24
- <ul>
25
- <li> Puedes jugar el juego sin preocuparte por quedarte sin recursos o esperar a que se regeneren. </li>
26
- <li> Puede desbloquear todos los asesinos y niveles sin completar ninguna tarea o logros. </li>
27
- <li>Puedes personalizar la apariencia y habilidades de tu asesino de acuerdo a tu preferencia. </li>
28
- <li>Puedes disfrutar del juego sin ningún anuncio o pop-ups que puedan distraerte o ralentizar tu dispositivo. </li>
29
- <li>Puedes divertirte más y desafiarte a ti mismo con los recursos y características ilimitados. </li>
30
- </ul>
31
- <h3>Los inconvenientes de Hunter Assassin Mod APK</h3>
32
- <p>Hunter Assassin Mod APK también tiene algunos inconvenientes que usted debe tener en cuenta antes de descargar e instalar. Algunos de estos inconvenientes son:</p>
33
- <ul>
34
- <li>Es posible que tenga algunos problemas de compatibilidad o errores con algunos dispositivos o sistemas operativos. </li>
35
- <li>Puedes perder tu progreso o datos si desinstalas la versión modificada o actualizas el juego original. </li>
36
- <li>Es posible que te prohíban o suspendan del juego si usas la versión modded en línea o en modo multijugador. </li>
37
- <li>Es posible que se pierda algunas actualizaciones o nuevas características que se agregan al juego original por los desarrolladores. </li>
38
- <li>Usted puede perder la emoción y la satisfacción de jugar el juego justo y cuadrado. </li>
39
- </ul>
40
-
41
- <p>Si desea descargar e instalar Hunter Assassin Mod APK, usted tiene que seguir algunos pasos simples y precauciones. Aquí hay una guía sobre cómo hacerlo:</p>
42
- <h3>Los pasos para descargar e instalar Hunter Assassin Mod APK</h3>
43
- <ol>
44
- <li>Vaya a un sitio web confiable y confiable que ofrece la versión modificada del juego. Puede buscarlo en Google o usar el enlace de abajo. </li>
45
- <li>Descargar el archivo APK de la versión modificada. Asegúrese de que tiene suficiente espacio de almacenamiento en el dispositivo. </li>
46
- <li>Ve a la configuración de tu dispositivo y habilita la opción de instalar aplicaciones desde fuentes desconocidas. Esto le permitirá instalar la versión modded sin ningún problema. </li>
47
- <li>Busque el archivo APK descargado en su dispositivo y toque en él para iniciar el proceso de instalación. </li>
48
- <li>Siga las instrucciones en la pantalla y espere a que termine la instalación. </li>
49
- <li>Iniciar el juego y disfrutar de los recursos y características ilimitadas. </li>
50
- </ol>
51
- <h3>Las precauciones a tomar antes de descargar e instalar Hunter Assassin Mod APK</h3>
52
- <p>Antes de descargar e instalar Hunter Assassin Mod APK, usted debe tomar algunas precauciones para evitar cualquier problema o riesgo. Aquí están algunos de ellos:</p>
53
- <ul>
54
- <li>Asegúrese de tener una conexión a Internet estable y un buen software antivirus en su dispositivo. </li>
55
- <li>Asegúrese de descargar la versión modificada de un sitio web confiable y confiable. Evite cualquier enlace sospechoso o malicioso que pueda dañar su dispositivo o datos. </li>
56
- <li>Asegúrese de hacer una copia de seguridad de sus datos y el progreso del juego original. Puede usar un servicio en la nube o un dispositivo de almacenamiento externo para este propósito. </li>
57
- <li>Asegúrese de desinstalar el juego original antes de instalar la versión modificada. Esto evitará cualquier conflicto o error entre las dos versiones. </li>
58
- <li>Asegúrese de usar la versión modded sin conexión o en modo para un jugador. No lo uses en línea o en modo multijugador, ya que esto puede hacer que te prohíban o suspendan del juego. </li>
59
- </ul>
60
- <h2>Conclusión</h2>
61
-
62
- <p>Hunter Assassin Mod APK es una versión modificada del juego original que le da acceso a recursos y características ilimitadas. Puedes jugar con dinero ilimitado, gemas, llaves y diamantes. También puede desbloquear todos los asesinos y niveles sin gastar dinero real. También puede eliminar los anuncios que podrían interrumpir su experiencia de juego. </p>
63
- <p>Si desea descargar e instalar Hunter Assassin Mod APK, usted tiene que seguir algunos pasos simples y precauciones. Tienes que ir a un sitio web confiable y confiable que ofrece la versión modificada del juego. Tienes que descargar el archivo APK de la versión modificada y habilitar la opción de instalar aplicaciones de fuentes desconocidas en tu dispositivo. Usted tiene que localizar el archivo APK descargado en su dispositivo y toque en él para iniciar el proceso de instalación. Tienes que lanzar el juego y disfrutar de los recursos y características ilimitadas. </p>
64
- <p>Sin embargo, también debe ser consciente de algunos inconvenientes de Hunter Assassin Mod APK. Es posible que se enfrenten a algunos problemas de compatibilidad o errores con algunos dispositivos o sistemas operativos. Puede perder su progreso o datos si desinstala la versión modificada o actualiza el juego original. Es posible que te prohíban o suspendan del juego si usas la versión modificada en línea o en modo multijugador. Es posible que se pierda algunas actualizaciones o nuevas características que se agregan al juego original por los desarrolladores. Usted puede perder la emoción y la satisfacción de jugar el juego justo y cuadrado. </p>
65
- <p>Por lo tanto, usted debe pesar los pros y los contras de Hunter Assassin Mod APK antes de decidir descargarlo e instalarlo. También debe seguir los pasos y precauciones cuidadosamente para evitar cualquier problema o riesgo. También debes respetar las reglas y políticas del juego y jugar responsablemente. </p>
66
- <h3>Preguntas frecuentes</h3>
67
- <p>Aquí hay algunas preguntas frecuentes sobre Hunter Assassin y Hunter Assassin Mod APK:</p>
68
- <ol>
69
- <li> ¿Cuál es la última versión de Hunter Assassin Mod APK? </li>
70
-
71
- <li> ¿Es seguro descargar e instalar Hunter Assassin Mod APK? </li>
72
- <p>Hunter Assassin Mod APK es seguro de descargar e instalar si lo obtiene de un sitio web confiable y confiable. Sin embargo, siempre debe escanear el archivo APK con un buen software antivirus antes de instalarlo. También debes hacer una copia de seguridad de tus datos y progreso del juego original antes de instalar la versión modificada. </p>
73
- <li> ¿Puedo jugar Hunter Assassin Mod APK offline? </li>
74
- <p>Sí, puede jugar Hunter Assassin Mod APK sin conexión a Internet. Sin embargo, es posible que no pueda acceder a algunas funciones o actualizaciones que requieren una conexión en línea. </p>
75
- <li> ¿Puedo jugar Hunter Assassin Mod APK con mis amigos? </li>
76
- <p>No, no se puede jugar Hunter Assassin Mod APK con tus amigos, ya que no tiene un modo multijugador. Solo se puede jugar en el modo de un solo jugador. Si quieres jugar con tus amigos, deberías usar el juego original. </p>
77
- <li>¿Cómo puedo contactar a los desarrolladores de Hunter Assassin? </li>
78
- <p>Puede ponerse en contacto con los desarrolladores de Hunter Assassin enviándoles un correo electrónico a [email protected]. También puede visitar su sitio web en https://www.rubygamestudio.com/ o seguirlos en Facebook en https://www.facebook.com/rubygamestudio.</p>
79
- </ol></p> 64aa2da5cf<br />
80
- <br />
81
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BetterAPI/BetterChat_new/src/lib/types/SharedConversation.ts DELETED
@@ -1,11 +0,0 @@
1
- import type { Message } from "./Message";
2
- import type { Timestamps } from "./Timestamps";
3
-
4
- export interface SharedConversation extends Timestamps {
5
- _id: string;
6
-
7
- hash: string;
8
-
9
- title: string;
10
- messages: Message[];
11
- }
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/commands/cache.py DELETED
@@ -1,222 +0,0 @@
1
- import os
2
- import textwrap
3
- from optparse import Values
4
- from typing import Any, List
5
-
6
- import pip._internal.utils.filesystem as filesystem
7
- from pip._internal.cli.base_command import Command
8
- from pip._internal.cli.status_codes import ERROR, SUCCESS
9
- from pip._internal.exceptions import CommandError, PipError
10
- from pip._internal.utils.logging import getLogger
11
-
12
- logger = getLogger(__name__)
13
-
14
-
15
- class CacheCommand(Command):
16
- """
17
- Inspect and manage pip's wheel cache.
18
-
19
- Subcommands:
20
-
21
- - dir: Show the cache directory.
22
- - info: Show information about the cache.
23
- - list: List filenames of packages stored in the cache.
24
- - remove: Remove one or more package from the cache.
25
- - purge: Remove all items from the cache.
26
-
27
- ``<pattern>`` can be a glob expression or a package name.
28
- """
29
-
30
- ignore_require_venv = True
31
- usage = """
32
- %prog dir
33
- %prog info
34
- %prog list [<pattern>] [--format=[human, abspath]]
35
- %prog remove <pattern>
36
- %prog purge
37
- """
38
-
39
- def add_options(self) -> None:
40
- self.cmd_opts.add_option(
41
- "--format",
42
- action="store",
43
- dest="list_format",
44
- default="human",
45
- choices=("human", "abspath"),
46
- help="Select the output format among: human (default) or abspath",
47
- )
48
-
49
- self.parser.insert_option_group(0, self.cmd_opts)
50
-
51
- def run(self, options: Values, args: List[str]) -> int:
52
- handlers = {
53
- "dir": self.get_cache_dir,
54
- "info": self.get_cache_info,
55
- "list": self.list_cache_items,
56
- "remove": self.remove_cache_items,
57
- "purge": self.purge_cache,
58
- }
59
-
60
- if not options.cache_dir:
61
- logger.error("pip cache commands can not function since cache is disabled.")
62
- return ERROR
63
-
64
- # Determine action
65
- if not args or args[0] not in handlers:
66
- logger.error(
67
- "Need an action (%s) to perform.",
68
- ", ".join(sorted(handlers)),
69
- )
70
- return ERROR
71
-
72
- action = args[0]
73
-
74
- # Error handling happens here, not in the action-handlers.
75
- try:
76
- handlers[action](options, args[1:])
77
- except PipError as e:
78
- logger.error(e.args[0])
79
- return ERROR
80
-
81
- return SUCCESS
82
-
83
- def get_cache_dir(self, options: Values, args: List[Any]) -> None:
84
- if args:
85
- raise CommandError("Too many arguments")
86
-
87
- logger.info(options.cache_dir)
88
-
89
- def get_cache_info(self, options: Values, args: List[Any]) -> None:
90
- if args:
91
- raise CommandError("Too many arguments")
92
-
93
- num_http_files = len(self._find_http_files(options))
94
- num_packages = len(self._find_wheels(options, "*"))
95
-
96
- http_cache_location = self._cache_dir(options, "http")
97
- wheels_cache_location = self._cache_dir(options, "wheels")
98
- http_cache_size = filesystem.format_directory_size(http_cache_location)
99
- wheels_cache_size = filesystem.format_directory_size(wheels_cache_location)
100
-
101
- message = (
102
- textwrap.dedent(
103
- """
104
- Package index page cache location: {http_cache_location}
105
- Package index page cache size: {http_cache_size}
106
- Number of HTTP files: {num_http_files}
107
- Locally built wheels location: {wheels_cache_location}
108
- Locally built wheels size: {wheels_cache_size}
109
- Number of locally built wheels: {package_count}
110
- """
111
- )
112
- .format(
113
- http_cache_location=http_cache_location,
114
- http_cache_size=http_cache_size,
115
- num_http_files=num_http_files,
116
- wheels_cache_location=wheels_cache_location,
117
- package_count=num_packages,
118
- wheels_cache_size=wheels_cache_size,
119
- )
120
- .strip()
121
- )
122
-
123
- logger.info(message)
124
-
125
- def list_cache_items(self, options: Values, args: List[Any]) -> None:
126
- if len(args) > 1:
127
- raise CommandError("Too many arguments")
128
-
129
- if args:
130
- pattern = args[0]
131
- else:
132
- pattern = "*"
133
-
134
- files = self._find_wheels(options, pattern)
135
- if options.list_format == "human":
136
- self.format_for_human(files)
137
- else:
138
- self.format_for_abspath(files)
139
-
140
- def format_for_human(self, files: List[str]) -> None:
141
- if not files:
142
- logger.info("No locally built wheels cached.")
143
- return
144
-
145
- results = []
146
- for filename in files:
147
- wheel = os.path.basename(filename)
148
- size = filesystem.format_file_size(filename)
149
- results.append(f" - {wheel} ({size})")
150
- logger.info("Cache contents:\n")
151
- logger.info("\n".join(sorted(results)))
152
-
153
- def format_for_abspath(self, files: List[str]) -> None:
154
- if not files:
155
- return
156
-
157
- results = []
158
- for filename in files:
159
- results.append(filename)
160
-
161
- logger.info("\n".join(sorted(results)))
162
-
163
- def remove_cache_items(self, options: Values, args: List[Any]) -> None:
164
- if len(args) > 1:
165
- raise CommandError("Too many arguments")
166
-
167
- if not args:
168
- raise CommandError("Please provide a pattern")
169
-
170
- files = self._find_wheels(options, args[0])
171
-
172
- no_matching_msg = "No matching packages"
173
- if args[0] == "*":
174
- # Only fetch http files if no specific pattern given
175
- files += self._find_http_files(options)
176
- else:
177
- # Add the pattern to the log message
178
- no_matching_msg += ' for pattern "{}"'.format(args[0])
179
-
180
- if not files:
181
- logger.warning(no_matching_msg)
182
-
183
- for filename in files:
184
- os.unlink(filename)
185
- logger.verbose("Removed %s", filename)
186
- logger.info("Files removed: %s", len(files))
187
-
188
- def purge_cache(self, options: Values, args: List[Any]) -> None:
189
- if args:
190
- raise CommandError("Too many arguments")
191
-
192
- return self.remove_cache_items(options, ["*"])
193
-
194
- def _cache_dir(self, options: Values, subdir: str) -> str:
195
- return os.path.join(options.cache_dir, subdir)
196
-
197
- def _find_http_files(self, options: Values) -> List[str]:
198
- http_dir = self._cache_dir(options, "http")
199
- return filesystem.find_files(http_dir, "*")
200
-
201
- def _find_wheels(self, options: Values, pattern: str) -> List[str]:
202
- wheel_dir = self._cache_dir(options, "wheels")
203
-
204
- # The wheel filename format, as specified in PEP 427, is:
205
- # {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl
206
- #
207
- # Additionally, non-alphanumeric values in the distribution are
208
- # normalized to underscores (_), meaning hyphens can never occur
209
- # before `-{version}`.
210
- #
211
- # Given that information:
212
- # - If the pattern we're given contains a hyphen (-), the user is
213
- # providing at least the version. Thus, we can just append `*.whl`
214
- # to match the rest of it.
215
- # - If the pattern we're given doesn't contain a hyphen (-), the
216
- # user is only providing the name. Thus, we append `-*.whl` to
217
- # match the hyphen before the version, followed by anything else.
218
- #
219
- # PEP 427: https://www.python.org/dev/peps/pep-0427/
220
- pattern = pattern + ("*.whl" if "-" in pattern else "-*.whl")
221
-
222
- return filesystem.find_files(wheel_dir, pattern)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/test_boxes.py DELETED
@@ -1,176 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import json
3
- import math
4
- import numpy as np
5
- import unittest
6
- import torch
7
-
8
- from detectron2.structures import Boxes, BoxMode, pairwise_iou
9
-
10
-
11
- class TestBoxMode(unittest.TestCase):
12
- def _convert_xy_to_wh(self, x):
13
- return BoxMode.convert(x, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
14
-
15
- def _convert_xywha_to_xyxy(self, x):
16
- return BoxMode.convert(x, BoxMode.XYWHA_ABS, BoxMode.XYXY_ABS)
17
-
18
- def _convert_xywh_to_xywha(self, x):
19
- return BoxMode.convert(x, BoxMode.XYWH_ABS, BoxMode.XYWHA_ABS)
20
-
21
- def test_box_convert_list(self):
22
- for tp in [list, tuple]:
23
- box = tp([5.0, 5.0, 10.0, 10.0])
24
- output = self._convert_xy_to_wh(box)
25
- self.assertIsInstance(output, tp)
26
- self.assertIsInstance(output[0], float)
27
- self.assertEqual(output, tp([5.0, 5.0, 5.0, 5.0]))
28
-
29
- with self.assertRaises(Exception):
30
- self._convert_xy_to_wh([box])
31
-
32
- def test_box_convert_array(self):
33
- box = np.asarray([[5, 5, 10, 10], [1, 1, 2, 3]])
34
- output = self._convert_xy_to_wh(box)
35
- self.assertEqual(output.dtype, box.dtype)
36
- self.assertEqual(output.shape, box.shape)
37
- self.assertTrue((output[0] == [5, 5, 5, 5]).all())
38
- self.assertTrue((output[1] == [1, 1, 1, 2]).all())
39
-
40
- def test_box_convert_cpu_tensor(self):
41
- box = torch.tensor([[5, 5, 10, 10], [1, 1, 2, 3]])
42
- output = self._convert_xy_to_wh(box)
43
- self.assertEqual(output.dtype, box.dtype)
44
- self.assertEqual(output.shape, box.shape)
45
- output = output.numpy()
46
- self.assertTrue((output[0] == [5, 5, 5, 5]).all())
47
- self.assertTrue((output[1] == [1, 1, 1, 2]).all())
48
-
49
- @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
50
- def test_box_convert_cuda_tensor(self):
51
- box = torch.tensor([[5, 5, 10, 10], [1, 1, 2, 3]]).cuda()
52
- output = self._convert_xy_to_wh(box)
53
- self.assertEqual(output.dtype, box.dtype)
54
- self.assertEqual(output.shape, box.shape)
55
- self.assertEqual(output.device, box.device)
56
- output = output.cpu().numpy()
57
- self.assertTrue((output[0] == [5, 5, 5, 5]).all())
58
- self.assertTrue((output[1] == [1, 1, 1, 2]).all())
59
-
60
- def test_box_convert_xywha_to_xyxy_list(self):
61
- for tp in [list, tuple]:
62
- box = tp([50, 50, 30, 20, 0])
63
- output = self._convert_xywha_to_xyxy(box)
64
- self.assertIsInstance(output, tp)
65
- self.assertEqual(output, tp([35, 40, 65, 60]))
66
-
67
- with self.assertRaises(Exception):
68
- self._convert_xywha_to_xyxy([box])
69
-
70
- def test_box_convert_xywha_to_xyxy_array(self):
71
- for dtype in [np.float64, np.float32]:
72
- box = np.asarray(
73
- [
74
- [50, 50, 30, 20, 0],
75
- [50, 50, 30, 20, 90],
76
- [1, 1, math.sqrt(2), math.sqrt(2), -45],
77
- ],
78
- dtype=dtype,
79
- )
80
- output = self._convert_xywha_to_xyxy(box)
81
- self.assertEqual(output.dtype, box.dtype)
82
- expected = np.asarray([[35, 40, 65, 60], [40, 35, 60, 65], [0, 0, 2, 2]], dtype=dtype)
83
- self.assertTrue(np.allclose(output, expected, atol=1e-6), "output={}".format(output))
84
-
85
- def test_box_convert_xywha_to_xyxy_tensor(self):
86
- for dtype in [torch.float32, torch.float64]:
87
- box = torch.tensor(
88
- [
89
- [50, 50, 30, 20, 0],
90
- [50, 50, 30, 20, 90],
91
- [1, 1, math.sqrt(2), math.sqrt(2), -45],
92
- ],
93
- dtype=dtype,
94
- )
95
- output = self._convert_xywha_to_xyxy(box)
96
- self.assertEqual(output.dtype, box.dtype)
97
- expected = torch.tensor([[35, 40, 65, 60], [40, 35, 60, 65], [0, 0, 2, 2]], dtype=dtype)
98
-
99
- self.assertTrue(torch.allclose(output, expected, atol=1e-6), "output={}".format(output))
100
-
101
- def test_box_convert_xywh_to_xywha_list(self):
102
- for tp in [list, tuple]:
103
- box = tp([50, 50, 30, 20])
104
- output = self._convert_xywh_to_xywha(box)
105
- self.assertIsInstance(output, tp)
106
- self.assertEqual(output, tp([65, 60, 30, 20, 0]))
107
-
108
- with self.assertRaises(Exception):
109
- self._convert_xywh_to_xywha([box])
110
-
111
- def test_box_convert_xywh_to_xywha_array(self):
112
- for dtype in [np.float64, np.float32]:
113
- box = np.asarray([[30, 40, 70, 60], [30, 40, 60, 70], [-1, -1, 2, 2]], dtype=dtype)
114
- output = self._convert_xywh_to_xywha(box)
115
- self.assertEqual(output.dtype, box.dtype)
116
- expected = np.asarray(
117
- [[65, 70, 70, 60, 0], [60, 75, 60, 70, 0], [0, 0, 2, 2, 0]], dtype=dtype
118
- )
119
- self.assertTrue(np.allclose(output, expected, atol=1e-6), "output={}".format(output))
120
-
121
- def test_box_convert_xywh_to_xywha_tensor(self):
122
- for dtype in [torch.float32, torch.float64]:
123
- box = torch.tensor([[30, 40, 70, 60], [30, 40, 60, 70], [-1, -1, 2, 2]], dtype=dtype)
124
- output = self._convert_xywh_to_xywha(box)
125
- self.assertEqual(output.dtype, box.dtype)
126
- expected = torch.tensor(
127
- [[65, 70, 70, 60, 0], [60, 75, 60, 70, 0], [0, 0, 2, 2, 0]], dtype=dtype
128
- )
129
-
130
- self.assertTrue(torch.allclose(output, expected, atol=1e-6), "output={}".format(output))
131
-
132
- def test_json_serializable(self):
133
- payload = {"box_mode": BoxMode.XYWH_REL}
134
- try:
135
- json.dumps(payload)
136
- except Exception:
137
- self.fail("JSON serialization failed")
138
-
139
- def test_json_deserializable(self):
140
- payload = '{"box_mode": 2}'
141
- obj = json.loads(payload)
142
- try:
143
- obj["box_mode"] = BoxMode(obj["box_mode"])
144
- except Exception:
145
- self.fail("JSON deserialization failed")
146
-
147
-
148
- class TestBoxIOU(unittest.TestCase):
149
- def test_pairwise_iou(self):
150
- boxes1 = torch.tensor([[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]])
151
-
152
- boxes2 = torch.tensor(
153
- [
154
- [0.0, 0.0, 1.0, 1.0],
155
- [0.0, 0.0, 0.5, 1.0],
156
- [0.0, 0.0, 1.0, 0.5],
157
- [0.0, 0.0, 0.5, 0.5],
158
- [0.5, 0.5, 1.0, 1.0],
159
- [0.5, 0.5, 1.5, 1.5],
160
- ]
161
- )
162
-
163
- expected_ious = torch.tensor(
164
- [
165
- [1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)],
166
- [1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)],
167
- ]
168
- )
169
-
170
- ious = pairwise_iou(Boxes(boxes1), Boxes(boxes2))
171
-
172
- self.assertTrue(torch.allclose(ious, expected_ious))
173
-
174
-
175
- if __name__ == "__main__":
176
- unittest.main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChevyWithAI/rvc-aicover/config.py DELETED
@@ -1,88 +0,0 @@
1
- ########################硬件参数########################
2
-
3
- # 填写cuda:x, cpu 或 mps, x指代第几张卡,只支持 N卡 / Apple Silicon 加速
4
- device = "cuda:0"
5
-
6
- # 9-10-20-30-40系显卡无脑True,不影响质量,>=20显卡开启有加速
7
- is_half = True
8
-
9
- # 默认0用上所有线程,写数字限制CPU资源使用
10
- n_cpu = 0
11
-
12
- ########################硬件参数########################
13
-
14
-
15
- ##################下为参数处理逻辑,勿动##################
16
-
17
- ########################命令行参数########################
18
- import argparse
19
-
20
- parser = argparse.ArgumentParser()
21
- parser.add_argument("--port", type=int, default=7865, help="Listen port")
22
- parser.add_argument("--pycmd", type=str, default="python", help="Python command")
23
- parser.add_argument("--colab", action="store_true", help="Launch in colab")
24
- parser.add_argument(
25
- "--noparallel", action="store_true", help="Disable parallel processing"
26
- )
27
- parser.add_argument(
28
- "--noautoopen", action="store_true", help="Do not open in browser automatically"
29
- )
30
- cmd_opts, unknown = parser.parse_known_args()
31
-
32
- python_cmd = cmd_opts.pycmd
33
- listen_port = cmd_opts.port
34
- iscolab = cmd_opts.colab
35
- noparallel = cmd_opts.noparallel
36
- noautoopen = cmd_opts.noautoopen
37
- ########################命令行参数########################
38
-
39
- import sys
40
- import torch
41
-
42
-
43
- # has_mps is only available in nightly pytorch (for now) and MasOS 12.3+.
44
- # check `getattr` and try it for compatibility
45
- def has_mps() -> bool:
46
- if sys.platform != "darwin":
47
- return False
48
- else:
49
- if not getattr(torch, "has_mps", False):
50
- return False
51
- try:
52
- torch.zeros(1).to(torch.device("mps"))
53
- return True
54
- except Exception:
55
- return False
56
-
57
-
58
- if not torch.cuda.is_available():
59
- if has_mps():
60
- print("没有发现支持的N卡, 使用MPS进行推理")
61
- device = "mps"
62
- else:
63
- print("没有发现支持的N卡, 使用CPU进行推理")
64
- device = "cpu"
65
- is_half = False
66
-
67
- if device not in ["cpu", "mps"]:
68
- gpu_name = torch.cuda.get_device_name(int(device.split(":")[-1]))
69
- if "16" in gpu_name or "MX" in gpu_name:
70
- print("16系显卡/MX系显卡强制单精度")
71
- is_half = False
72
-
73
- from multiprocessing import cpu_count
74
-
75
- if n_cpu == 0:
76
- n_cpu = cpu_count()
77
- if is_half:
78
- # 6G显存配置
79
- x_pad = 3
80
- x_query = 10
81
- x_center = 60
82
- x_max = 65
83
- else:
84
- # 5G显存配置
85
- x_pad = 1
86
- x_query = 6
87
- x_center = 38
88
- x_max = 41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/meme_generator/memes/maimai_join/__init__.py DELETED
@@ -1,22 +0,0 @@
1
- from pathlib import Path
2
- from typing import List
3
-
4
- from pil_utils import BuildImage
5
-
6
- from meme_generator import add_meme
7
- from meme_generator.utils import make_jpg_or_gif
8
-
9
- img_dir = Path(__file__).parent / "images"
10
-
11
-
12
- def maimai_join(images: List[BuildImage], texts, args):
13
- frame = BuildImage.open(img_dir / "0.png")
14
-
15
- def make(img: BuildImage) -> BuildImage:
16
- img = img.convert("RGBA").square().resize((400, 400))
17
- return frame.copy().paste(img, (50, 50), alpha=True, below=True)
18
-
19
- return make_jpg_or_gif(images[0], make)
20
-
21
-
22
- add_meme("maimai_join", maimai_join, min_images=1, max_images=1, keywords=["旅行伙伴加入"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CjangCjengh/Sanskrit-TTS/attentions.py DELETED
@@ -1,300 +0,0 @@
1
- import math
2
- import torch
3
- from torch import nn
4
- from torch.nn import functional as F
5
-
6
- import commons
7
- from modules import LayerNorm
8
-
9
-
10
- class Encoder(nn.Module):
11
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
12
- super().__init__()
13
- self.hidden_channels = hidden_channels
14
- self.filter_channels = filter_channels
15
- self.n_heads = n_heads
16
- self.n_layers = n_layers
17
- self.kernel_size = kernel_size
18
- self.p_dropout = p_dropout
19
- self.window_size = window_size
20
-
21
- self.drop = nn.Dropout(p_dropout)
22
- self.attn_layers = nn.ModuleList()
23
- self.norm_layers_1 = nn.ModuleList()
24
- self.ffn_layers = nn.ModuleList()
25
- self.norm_layers_2 = nn.ModuleList()
26
- for i in range(self.n_layers):
27
- self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
28
- self.norm_layers_1.append(LayerNorm(hidden_channels))
29
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
30
- self.norm_layers_2.append(LayerNorm(hidden_channels))
31
-
32
- def forward(self, x, x_mask):
33
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
34
- x = x * x_mask
35
- for i in range(self.n_layers):
36
- y = self.attn_layers[i](x, x, attn_mask)
37
- y = self.drop(y)
38
- x = self.norm_layers_1[i](x + y)
39
-
40
- y = self.ffn_layers[i](x, x_mask)
41
- y = self.drop(y)
42
- x = self.norm_layers_2[i](x + y)
43
- x = x * x_mask
44
- return x
45
-
46
-
47
- class Decoder(nn.Module):
48
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
49
- super().__init__()
50
- self.hidden_channels = hidden_channels
51
- self.filter_channels = filter_channels
52
- self.n_heads = n_heads
53
- self.n_layers = n_layers
54
- self.kernel_size = kernel_size
55
- self.p_dropout = p_dropout
56
- self.proximal_bias = proximal_bias
57
- self.proximal_init = proximal_init
58
-
59
- self.drop = nn.Dropout(p_dropout)
60
- self.self_attn_layers = nn.ModuleList()
61
- self.norm_layers_0 = nn.ModuleList()
62
- self.encdec_attn_layers = nn.ModuleList()
63
- self.norm_layers_1 = nn.ModuleList()
64
- self.ffn_layers = nn.ModuleList()
65
- self.norm_layers_2 = nn.ModuleList()
66
- for i in range(self.n_layers):
67
- self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
68
- self.norm_layers_0.append(LayerNorm(hidden_channels))
69
- self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
70
- self.norm_layers_1.append(LayerNorm(hidden_channels))
71
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
72
- self.norm_layers_2.append(LayerNorm(hidden_channels))
73
-
74
- def forward(self, x, x_mask, h, h_mask):
75
- """
76
- x: decoder input
77
- h: encoder output
78
- """
79
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
80
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
81
- x = x * x_mask
82
- for i in range(self.n_layers):
83
- y = self.self_attn_layers[i](x, x, self_attn_mask)
84
- y = self.drop(y)
85
- x = self.norm_layers_0[i](x + y)
86
-
87
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
88
- y = self.drop(y)
89
- x = self.norm_layers_1[i](x + y)
90
-
91
- y = self.ffn_layers[i](x, x_mask)
92
- y = self.drop(y)
93
- x = self.norm_layers_2[i](x + y)
94
- x = x * x_mask
95
- return x
96
-
97
-
98
- class MultiHeadAttention(nn.Module):
99
- def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
100
- super().__init__()
101
- assert channels % n_heads == 0
102
-
103
- self.channels = channels
104
- self.out_channels = out_channels
105
- self.n_heads = n_heads
106
- self.p_dropout = p_dropout
107
- self.window_size = window_size
108
- self.heads_share = heads_share
109
- self.block_length = block_length
110
- self.proximal_bias = proximal_bias
111
- self.proximal_init = proximal_init
112
- self.attn = None
113
-
114
- self.k_channels = channels // n_heads
115
- self.conv_q = nn.Conv1d(channels, channels, 1)
116
- self.conv_k = nn.Conv1d(channels, channels, 1)
117
- self.conv_v = nn.Conv1d(channels, channels, 1)
118
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
119
- self.drop = nn.Dropout(p_dropout)
120
-
121
- if window_size is not None:
122
- n_heads_rel = 1 if heads_share else n_heads
123
- rel_stddev = self.k_channels**-0.5
124
- self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
125
- self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
126
-
127
- nn.init.xavier_uniform_(self.conv_q.weight)
128
- nn.init.xavier_uniform_(self.conv_k.weight)
129
- nn.init.xavier_uniform_(self.conv_v.weight)
130
- if proximal_init:
131
- with torch.no_grad():
132
- self.conv_k.weight.copy_(self.conv_q.weight)
133
- self.conv_k.bias.copy_(self.conv_q.bias)
134
-
135
- def forward(self, x, c, attn_mask=None):
136
- q = self.conv_q(x)
137
- k = self.conv_k(c)
138
- v = self.conv_v(c)
139
-
140
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
141
-
142
- x = self.conv_o(x)
143
- return x
144
-
145
- def attention(self, query, key, value, mask=None):
146
- # reshape [b, d, t] -> [b, n_h, t, d_k]
147
- b, d, t_s, t_t = (*key.size(), query.size(2))
148
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
149
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
150
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
151
-
152
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
153
- if self.window_size is not None:
154
- assert t_s == t_t, "Relative attention is only available for self-attention."
155
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
156
- rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
157
- scores_local = self._relative_position_to_absolute_position(rel_logits)
158
- scores = scores + scores_local
159
- if self.proximal_bias:
160
- assert t_s == t_t, "Proximal bias is only available for self-attention."
161
- scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
162
- if mask is not None:
163
- scores = scores.masked_fill(mask == 0, -1e4)
164
- if self.block_length is not None:
165
- assert t_s == t_t, "Local attention is only available for self-attention."
166
- block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
167
- scores = scores.masked_fill(block_mask == 0, -1e4)
168
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
169
- p_attn = self.drop(p_attn)
170
- output = torch.matmul(p_attn, value)
171
- if self.window_size is not None:
172
- relative_weights = self._absolute_position_to_relative_position(p_attn)
173
- value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
174
- output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
175
- output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
176
- return output, p_attn
177
-
178
- def _matmul_with_relative_values(self, x, y):
179
- """
180
- x: [b, h, l, m]
181
- y: [h or 1, m, d]
182
- ret: [b, h, l, d]
183
- """
184
- ret = torch.matmul(x, y.unsqueeze(0))
185
- return ret
186
-
187
- def _matmul_with_relative_keys(self, x, y):
188
- """
189
- x: [b, h, l, d]
190
- y: [h or 1, m, d]
191
- ret: [b, h, l, m]
192
- """
193
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
194
- return ret
195
-
196
- def _get_relative_embeddings(self, relative_embeddings, length):
197
- max_relative_position = 2 * self.window_size + 1
198
- # Pad first before slice to avoid using cond ops.
199
- pad_length = max(length - (self.window_size + 1), 0)
200
- slice_start_position = max((self.window_size + 1) - length, 0)
201
- slice_end_position = slice_start_position + 2 * length - 1
202
- if pad_length > 0:
203
- padded_relative_embeddings = F.pad(
204
- relative_embeddings,
205
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
206
- else:
207
- padded_relative_embeddings = relative_embeddings
208
- used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
209
- return used_relative_embeddings
210
-
211
- def _relative_position_to_absolute_position(self, x):
212
- """
213
- x: [b, h, l, 2*l-1]
214
- ret: [b, h, l, l]
215
- """
216
- batch, heads, length, _ = x.size()
217
- # Concat columns of pad to shift from relative to absolute indexing.
218
- x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
219
-
220
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
221
- x_flat = x.view([batch, heads, length * 2 * length])
222
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
223
-
224
- # Reshape and slice out the padded elements.
225
- x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
226
- return x_final
227
-
228
- def _absolute_position_to_relative_position(self, x):
229
- """
230
- x: [b, h, l, l]
231
- ret: [b, h, l, 2*l-1]
232
- """
233
- batch, heads, length, _ = x.size()
234
- # padd along column
235
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
236
- x_flat = x.view([batch, heads, length**2 + length*(length -1)])
237
- # add 0's in the beginning that will skew the elements after reshape
238
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
239
- x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
240
- return x_final
241
-
242
- def _attention_bias_proximal(self, length):
243
- """Bias for self-attention to encourage attention to close positions.
244
- Args:
245
- length: an integer scalar.
246
- Returns:
247
- a Tensor with shape [1, 1, length, length]
248
- """
249
- r = torch.arange(length, dtype=torch.float32)
250
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
251
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
252
-
253
-
254
- class FFN(nn.Module):
255
- def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
256
- super().__init__()
257
- self.in_channels = in_channels
258
- self.out_channels = out_channels
259
- self.filter_channels = filter_channels
260
- self.kernel_size = kernel_size
261
- self.p_dropout = p_dropout
262
- self.activation = activation
263
- self.causal = causal
264
-
265
- if causal:
266
- self.padding = self._causal_padding
267
- else:
268
- self.padding = self._same_padding
269
-
270
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
271
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
272
- self.drop = nn.Dropout(p_dropout)
273
-
274
- def forward(self, x, x_mask):
275
- x = self.conv_1(self.padding(x * x_mask))
276
- if self.activation == "gelu":
277
- x = x * torch.sigmoid(1.702 * x)
278
- else:
279
- x = torch.relu(x)
280
- x = self.drop(x)
281
- x = self.conv_2(self.padding(x * x_mask))
282
- return x * x_mask
283
-
284
- def _causal_padding(self, x):
285
- if self.kernel_size == 1:
286
- return x
287
- pad_l = self.kernel_size - 1
288
- pad_r = 0
289
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
290
- x = F.pad(x, commons.convert_pad_shape(padding))
291
- return x
292
-
293
- def _same_padding(self, x):
294
- if self.kernel_size == 1:
295
- return x
296
- pad_l = (self.kernel_size - 1) // 2
297
- pad_r = self.kernel_size // 2
298
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
299
- x = F.pad(x, commons.convert_pad_shape(padding))
300
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/backbone/fpn.py DELETED
@@ -1,98 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2
- import torch
3
- import torch.nn.functional as F
4
- from torch import nn
5
-
6
-
7
- class FPN(nn.Module):
8
- """
9
- Module that adds FPN on top of a list of feature maps.
10
- The feature maps are currently supposed to be in increasing depth order, and must be consecutive
11
- """
12
-
13
- def __init__(
14
- self, in_channels_list, out_channels, conv_block, top_blocks=None
15
- ):
16
- """
17
- Arguments:
18
- in_channels_list (list[int]): number of channels for each feature map that
19
- will be fed
20
- out_channels (int): number of channels of the FPN representation
21
- top_blocks (nn.Module or None): if provided, an extra operation will
22
- be performed on the output of the last (smallest resolution)
23
- FPN output, and the result will extend the result list
24
- """
25
- super(FPN, self).__init__()
26
- self.inner_blocks = []
27
- self.layer_blocks = []
28
- for idx, in_channels in enumerate(in_channels_list, 1):
29
- inner_block = "fpn_inner{}".format(idx)
30
- layer_block = "fpn_layer{}".format(idx)
31
-
32
- if in_channels == 0:
33
- continue
34
- inner_block_module = conv_block(in_channels, out_channels, 1)
35
- layer_block_module = conv_block(out_channels, out_channels, 3, 1)
36
- self.add_module(inner_block, inner_block_module)
37
- self.add_module(layer_block, layer_block_module)
38
- self.inner_blocks.append(inner_block)
39
- self.layer_blocks.append(layer_block)
40
- self.top_blocks = top_blocks
41
-
42
- def forward(self, x):
43
- """
44
- Arguments:
45
- x (list[Tensor]): feature maps for each feature level.
46
- Returns:
47
- results (tuple[Tensor]): feature maps after FPN layers.
48
- They are ordered from highest resolution first.
49
- """
50
- last_inner = getattr(self, self.inner_blocks[-1])(x[-1])
51
- results = []
52
- results.append(getattr(self, self.layer_blocks[-1])(last_inner))
53
- for feature, inner_block, layer_block in zip(
54
- x[:-1][::-1], self.inner_blocks[:-1][::-1], self.layer_blocks[:-1][::-1]
55
- ):
56
- if not inner_block:
57
- continue
58
- inner_top_down = F.interpolate(last_inner, scale_factor=2, mode="nearest")
59
- inner_lateral = getattr(self, inner_block)(feature)
60
- # TODO use size instead of scale to make it robust to different sizes
61
- # inner_top_down = F.upsample(last_inner, size=inner_lateral.shape[-2:],
62
- # mode='bilinear', align_corners=False)
63
- last_inner = inner_lateral + inner_top_down
64
- results.insert(0, getattr(self, layer_block)(last_inner))
65
-
66
- if isinstance(self.top_blocks, LastLevelP6P7):
67
- last_results = self.top_blocks(x[-1], results[-1])
68
- results.extend(last_results)
69
- elif isinstance(self.top_blocks, LastLevelMaxPool):
70
- last_results = self.top_blocks(results[-1])
71
- results.extend(last_results)
72
-
73
- return tuple(results)
74
-
75
-
76
- class LastLevelMaxPool(nn.Module):
77
- def forward(self, x):
78
- return [F.max_pool2d(x, 1, 2, 0)]
79
-
80
-
81
- class LastLevelP6P7(nn.Module):
82
- """
83
- This module is used in RetinaNet to generate extra layers, P6 and P7.
84
- """
85
- def __init__(self, in_channels, out_channels):
86
- super(LastLevelP6P7, self).__init__()
87
- self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
88
- self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)
89
- for module in [self.p6, self.p7]:
90
- nn.init.kaiming_uniform_(module.weight, a=1)
91
- nn.init.constant_(module.bias, 0)
92
- self.use_P5 = in_channels == out_channels
93
-
94
- def forward(self, c5, p5):
95
- x = p5 if self.use_P5 else c5
96
- p6 = self.p6(x)
97
- p7 = self.p7(F.relu(p6))
98
- return [p6, p7]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-9af10d66.js DELETED
@@ -1,2 +0,0 @@
1
- import{S as o,e as s,s as a}from"./index-1d65707a.js";class n extends o{constructor(e){super(),s(this,e,null,null,a,{})}}const c=n,p=["static"],d=t=>({type:{payload:"Any"},description:{payload:"stored state value"},example_data:""});export{c as Component,d as document,p as modes};
2
- //# sourceMappingURL=index-9af10d66.js.map
 
 
 
spaces/Detomo/Object_detection/app.py DELETED
@@ -1,58 +0,0 @@
1
- import gradio as gr
2
- import torch
3
- from sahi.prediction import ObjectPrediction
4
- from sahi.utils.cv import visualize_object_predictions, read_image
5
- from ultralyticsplus import YOLO, render_result
6
-
7
-
8
- def yolov8_inference(
9
- image,
10
- model_path,
11
- image_size,
12
- conf_threshold,
13
- iou_threshold,
14
- ):
15
- """
16
- YOLOv8 inference function
17
- Args:
18
- image: Input image
19
- model_path: Path to the model
20
- image_size: Image size
21
- conf_threshold: Confidence threshold
22
- iou_threshold: IOU threshold
23
- Returns:
24
- Rendered image
25
- """
26
- model = YOLO(f'kadirnar/{model_path}-v8.0')
27
- # set model parameters
28
- model.overrides['conf'] = conf_threshold # NMS confidence threshold
29
- model.overrides['iou'] = iou_threshold # NMS IoU threshold
30
- model.overrides['agnostic_nms'] = False # NMS class-agnostic
31
- model.overrides['max_det'] = 1000 # maximum number of detections per image
32
- results = model.predict(image, imgsz=image_size)
33
- render = render_result(model=model, image=image, result=results[0])
34
- return render
35
-
36
-
37
- inputs = [
38
- gr.Image(type="filepath", label="Input Image"),
39
- gr.Dropdown(["yolov8n", "yolov8m", "yolov8l", "yolov8x"],
40
- default="yolov8m", label="Model"),
41
- gr.Slider(minimum=320, maximum=1280, default=640, step=320, label="Image Size"),
42
- gr.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
43
- gr.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"),
44
- ]
45
-
46
- outputs = gr.Image(type="filepath", label="Output Image")
47
- title = "State-of-the-Art YOLO Models for Object detection"
48
-
49
- examples = [['demo_01.jpg', 'yolov8n', 640, 0.25, 0.45], ['demo_02.jpg', 'yolov8l', 640, 0.25, 0.45], ['demo_03.jpg', 'yolov8x', 1280, 0.25, 0.45]]
50
- demo_app = gr.Interface(
51
- fn=yolov8_inference,
52
- inputs=inputs,
53
- outputs=outputs,
54
- title=title,
55
- examples=examples,
56
- cache_examples=True,
57
- )
58
- demo_app.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Detomo/ai-comic-generation/src/lib/generateSeed.ts DELETED
@@ -1,3 +0,0 @@
1
- export function generateSeed() {
2
- return Math.floor(Math.random() * Math.pow(2, 31));
3
- }
 
 
 
 
spaces/DragGan/DragGan-Inversion/PTI/criteria/localitly_regulizer.py DELETED
@@ -1,65 +0,0 @@
1
- import torch
2
- import numpy as np
3
- from PTI.criteria import l2_loss
4
- from PTI.configs import hyperparameters
5
- from PTI.configs import global_config
6
-
7
-
8
- class Space_Regulizer:
9
- def __init__(self, original_G, lpips_net):
10
- self.original_G = original_G
11
- self.morphing_regulizer_alpha = hyperparameters.regulizer_alpha
12
- self.lpips_loss = lpips_net
13
-
14
- def get_morphed_w_code(self, new_w_code, fixed_w):
15
- interpolation_direction = new_w_code - fixed_w
16
- interpolation_direction_norm = torch.norm(interpolation_direction, p=2)
17
- direction_to_move = hyperparameters.regulizer_alpha * \
18
- interpolation_direction / interpolation_direction_norm
19
- result_w = fixed_w + direction_to_move
20
- self.morphing_regulizer_alpha * fixed_w + \
21
- (1 - self.morphing_regulizer_alpha) * new_w_code
22
-
23
- return result_w
24
-
25
- def get_image_from_ws(self, w_codes, G):
26
- return torch.cat([G.synthesis(w_code, noise_mode='none', force_fp32=True) for w_code in w_codes])
27
-
28
- def ball_holder_loss_lazy(self, new_G, num_of_sampled_latents, w_batch, use_wandb=False):
29
- loss = 0.0
30
-
31
- z_samples = np.random.randn(
32
- num_of_sampled_latents, self.original_G.z_dim)
33
- w_samples = self.original_G.mapping(torch.from_numpy(z_samples).to(global_config.device), None,
34
- truncation_psi=0.5)
35
- territory_indicator_ws = [self.get_morphed_w_code(
36
- w_code.unsqueeze(0), w_batch) for w_code in w_samples]
37
-
38
- for w_code in territory_indicator_ws:
39
- new_img = new_G.synthesis(
40
- w_code, noise_mode='none', force_fp32=True)
41
- with torch.no_grad():
42
- old_img = self.original_G.synthesis(
43
- w_code, noise_mode='none', force_fp32=True)
44
-
45
- if hyperparameters.regulizer_l2_lambda > 0:
46
- l2_loss_val = l2_loss.l2_loss(old_img, new_img)
47
- if use_wandb:
48
- wandb.log({f'space_regulizer_l2_loss_val': l2_loss_val.detach().cpu()},
49
- step=global_config.training_step)
50
- loss += l2_loss_val * hyperparameters.regulizer_l2_lambda
51
-
52
- if hyperparameters.regulizer_lpips_lambda > 0:
53
- loss_lpips = self.lpips_loss(old_img, new_img)
54
- loss_lpips = torch.mean(torch.squeeze(loss_lpips))
55
- if use_wandb:
56
- wandb.log({f'space_regulizer_lpips_loss_val': loss_lpips.detach().cpu()},
57
- step=global_config.training_step)
58
- loss += loss_lpips * hyperparameters.regulizer_lpips_lambda
59
-
60
- return loss / len(territory_indicator_ws)
61
-
62
- def space_regulizer_loss(self, new_G, w_batch, use_wandb):
63
- ret_val = self.ball_holder_loss_lazy(
64
- new_G, hyperparameters.latent_ball_num_of_samples, w_batch, use_wandb)
65
- return ret_val
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DragGan/DragGan/stylegan_human/training/networks_stylegan2.py DELETED
@@ -1,824 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- """Network architectures from the paper
10
- "Analyzing and Improving the Image Quality of StyleGAN".
11
- Matches the original implementation of configs E-F by Karras et al. at
12
- https://github.com/NVlabs/stylegan2/blob/master/training/networks_stylegan2.py"""
13
-
14
- import numpy as np
15
- import torch
16
- import torch.nn.functional as F
17
- from torch_utils import misc
18
- from torch_utils import persistence
19
- from torch_utils.ops import conv2d_resample
20
- from torch_utils.ops import upfirdn2d
21
- from torch_utils.ops import bias_act
22
- from torch_utils.ops import fma
23
-
24
- #----------------------------------------------------------------------------
25
-
26
- @misc.profiled_function
27
- def normalize_2nd_moment(x, dim=1, eps=1e-8):
28
- return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt()
29
-
30
- #----------------------------------------------------------------------------
31
-
32
- @misc.profiled_function
33
- def modulated_conv2d(
34
- x, # Input tensor of shape [batch_size, in_channels, in_height, in_width].
35
- weight, # Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width].
36
- styles, # Modulation coefficients of shape [batch_size, in_channels].
37
- noise = None, # Optional noise tensor to add to the output activations.
38
- up = 1, # Integer upsampling factor.
39
- down = 1, # Integer downsampling factor.
40
- padding = 0, # Padding with respect to the upsampled image.
41
- resample_filter = None, # Low-pass filter to apply when resampling activations. Must be prepared beforehand by calling upfirdn2d.setup_filter().
42
- demodulate = True, # Apply weight demodulation?
43
- flip_weight = True, # False = convolution, True = correlation (matches torch.nn.functional.conv2d).
44
- fused_modconv = True, # Perform modulation, convolution, and demodulation as a single fused operation?
45
- ):
46
- batch_size = x.shape[0]
47
- out_channels, in_channels, kh, kw = weight.shape
48
- misc.assert_shape(weight, [out_channels, in_channels, kh, kw]) # [OIkk]
49
- misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW]
50
- misc.assert_shape(styles, [batch_size, in_channels]) # [NI]
51
-
52
- # Pre-normalize inputs to avoid FP16 overflow.
53
- if x.dtype == torch.float16 and demodulate:
54
- weight = weight * (1 / np.sqrt(in_channels * kh * kw) / weight.norm(float('inf'), dim=[1,2,3], keepdim=True)) # max_Ikk
55
- styles = styles / styles.norm(float('inf'), dim=1, keepdim=True) # max_I
56
-
57
- # Calculate per-sample weights and demodulation coefficients.
58
- w = None
59
- dcoefs = None
60
- if demodulate or fused_modconv:
61
- w = weight.unsqueeze(0) # [NOIkk]
62
- w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk]
63
- if demodulate:
64
- dcoefs = (w.square().sum(dim=[2,3,4]) + 1e-8).rsqrt() # [NO]
65
- if demodulate and fused_modconv:
66
- w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk]
67
-
68
- # Execute by scaling the activations before and after the convolution.
69
- if not fused_modconv:
70
- x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1)
71
- x = conv2d_resample.conv2d_resample(x=x, w=weight.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, flip_weight=flip_weight)
72
- if demodulate and noise is not None:
73
- x = fma.fma(x, dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1), noise.to(x.dtype))
74
- elif demodulate:
75
- x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1)
76
- elif noise is not None:
77
- x = x.add_(noise.to(x.dtype))
78
- return x
79
-
80
- # Execute as one fused op using grouped convolution.
81
- with misc.suppress_tracer_warnings(): # this value will be treated as a constant
82
- batch_size = int(batch_size)
83
- misc.assert_shape(x, [batch_size, in_channels, None, None])
84
- x = x.reshape(1, -1, *x.shape[2:])
85
- w = w.reshape(-1, in_channels, kh, kw)
86
- x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, groups=batch_size, flip_weight=flip_weight)
87
- x = x.reshape(batch_size, -1, *x.shape[2:])
88
- if noise is not None:
89
- x = x.add_(noise)
90
- return x
91
-
92
- #----------------------------------------------------------------------------
93
-
94
- @persistence.persistent_class
95
- class FullyConnectedLayer(torch.nn.Module):
96
- def __init__(self,
97
- in_features, # Number of input features.
98
- out_features, # Number of output features.
99
- bias = True, # Apply additive bias before the activation function?
100
- activation = 'linear', # Activation function: 'relu', 'lrelu', etc.
101
- lr_multiplier = 1, # Learning rate multiplier.
102
- bias_init = 0, # Initial value for the additive bias.
103
- ):
104
- super().__init__()
105
- self.in_features = in_features
106
- self.out_features = out_features
107
- self.activation = activation
108
- self.weight = torch.nn.Parameter(torch.randn([out_features, in_features]) / lr_multiplier)
109
- self.bias = torch.nn.Parameter(torch.full([out_features], np.float32(bias_init))) if bias else None
110
- self.weight_gain = lr_multiplier / np.sqrt(in_features)
111
- self.bias_gain = lr_multiplier
112
-
113
- def forward(self, x):
114
- w = self.weight.to(x.dtype) * self.weight_gain
115
- b = self.bias
116
- if b is not None:
117
- b = b.to(x.dtype)
118
- if self.bias_gain != 1:
119
- b = b * self.bias_gain
120
-
121
- if self.activation == 'linear' and b is not None:
122
- x = torch.addmm(b.unsqueeze(0), x, w.t())
123
- else:
124
- x = x.matmul(w.t())
125
- x = bias_act.bias_act(x, b, act=self.activation)
126
- return x
127
-
128
- def extra_repr(self):
129
- return f'in_features={self.in_features:d}, out_features={self.out_features:d}, activation={self.activation:s}'
130
-
131
- #----------------------------------------------------------------------------
132
-
133
- @persistence.persistent_class
134
- class Conv2dLayer(torch.nn.Module):
135
- def __init__(self,
136
- in_channels, # Number of input channels.
137
- out_channels, # Number of output channels.
138
- kernel_size, # Width and height of the convolution kernel.
139
- bias = True, # Apply additive bias before the activation function?
140
- activation = 'linear', # Activation function: 'relu', 'lrelu', etc.
141
- up = 1, # Integer upsampling factor.
142
- down = 1, # Integer downsampling factor.
143
- resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
144
- conv_clamp = None, # Clamp the output to +-X, None = disable clamping.
145
- channels_last = False, # Expect the input to have memory_format=channels_last?
146
- trainable = True, # Update the weights of this layer during training?
147
- ):
148
- super().__init__()
149
- self.in_channels = in_channels
150
- self.out_channels = out_channels
151
- self.activation = activation
152
- self.up = up
153
- self.down = down
154
- self.conv_clamp = conv_clamp
155
- self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
156
- self.padding = kernel_size // 2
157
- self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
158
- self.act_gain = bias_act.activation_funcs[activation].def_gain
159
-
160
- memory_format = torch.channels_last if channels_last else torch.contiguous_format
161
- weight = torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format)
162
- bias = torch.zeros([out_channels]) if bias else None
163
- if trainable:
164
- self.weight = torch.nn.Parameter(weight)
165
- self.bias = torch.nn.Parameter(bias) if bias is not None else None
166
- else:
167
- self.register_buffer('weight', weight)
168
- if bias is not None:
169
- self.register_buffer('bias', bias)
170
- else:
171
- self.bias = None
172
-
173
- def forward(self, x, gain=1):
174
- w = self.weight * self.weight_gain
175
- b = self.bias.to(x.dtype) if self.bias is not None else None
176
- flip_weight = (self.up == 1) # slightly faster
177
- x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=self.resample_filter, up=self.up, down=self.down, padding=self.padding, flip_weight=flip_weight)
178
-
179
- act_gain = self.act_gain * gain
180
- act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
181
- x = bias_act.bias_act(x, b, act=self.activation, gain=act_gain, clamp=act_clamp)
182
- return x
183
-
184
- def extra_repr(self):
185
- return ' '.join([
186
- f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, activation={self.activation:s},',
187
- f'up={self.up}, down={self.down}'])
188
-
189
- #----------------------------------------------------------------------------
190
-
191
- @persistence.persistent_class
192
- class MappingNetwork(torch.nn.Module):
193
- def __init__(self,
194
- z_dim, # Input latent (Z) dimensionality, 0 = no latent.
195
- c_dim, # Conditioning label (C) dimensionality, 0 = no label.
196
- w_dim, # Intermediate latent (W) dimensionality.
197
- num_ws, # Number of intermediate latents to output, None = do not broadcast.
198
- num_layers = 8, # Number of mapping layers.
199
- embed_features = None, # Label embedding dimensionality, None = same as w_dim.
200
- layer_features = None, # Number of intermediate features in the mapping layers, None = same as w_dim.
201
- activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
202
- lr_multiplier = 0.01, # Learning rate multiplier for the mapping layers.
203
- w_avg_beta = 0.998, # Decay for tracking the moving average of W during training, None = do not track.
204
- ):
205
- super().__init__()
206
- self.z_dim = z_dim
207
- self.c_dim = c_dim
208
- self.w_dim = w_dim
209
- self.num_ws = num_ws
210
- self.num_layers = num_layers
211
- self.w_avg_beta = w_avg_beta
212
-
213
- if embed_features is None:
214
- embed_features = w_dim
215
- if c_dim == 0:
216
- embed_features = 0
217
- if layer_features is None:
218
- layer_features = w_dim
219
- features_list = [z_dim + embed_features] + [layer_features] * (num_layers - 1) + [w_dim]
220
-
221
- if c_dim > 0:
222
- self.embed = FullyConnectedLayer(c_dim, embed_features)
223
- for idx in range(num_layers):
224
- in_features = features_list[idx]
225
- out_features = features_list[idx + 1]
226
- layer = FullyConnectedLayer(in_features, out_features, activation=activation, lr_multiplier=lr_multiplier)
227
- setattr(self, f'fc{idx}', layer)
228
-
229
- if num_ws is not None and w_avg_beta is not None:
230
- self.register_buffer('w_avg', torch.zeros([w_dim]))
231
-
232
- def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False):
233
- # Embed, normalize, and concat inputs.
234
- x = None
235
- with torch.autograd.profiler.record_function('input'):
236
- if self.z_dim > 0:
237
- misc.assert_shape(z, [None, self.z_dim])
238
- x = normalize_2nd_moment(z.to(torch.float32))
239
- if self.c_dim > 0:
240
- misc.assert_shape(c, [None, self.c_dim])
241
- y = normalize_2nd_moment(self.embed(c.to(torch.float32)))
242
- x = torch.cat([x, y], dim=1) if x is not None else y
243
-
244
- # Main layers.
245
- for idx in range(self.num_layers):
246
- layer = getattr(self, f'fc{idx}')
247
- x = layer(x)
248
-
249
- # Update moving average of W.
250
- if update_emas and self.w_avg_beta is not None:
251
- with torch.autograd.profiler.record_function('update_w_avg'):
252
- self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta))
253
-
254
- # Broadcast.
255
- if self.num_ws is not None:
256
- with torch.autograd.profiler.record_function('broadcast'):
257
- x = x.unsqueeze(1).repeat([1, self.num_ws, 1])
258
-
259
- # Apply truncation.
260
- if truncation_psi != 1:
261
- with torch.autograd.profiler.record_function('truncate'):
262
- assert self.w_avg_beta is not None
263
- if self.num_ws is None or truncation_cutoff is None:
264
- x = self.w_avg.lerp(x, truncation_psi)
265
- else:
266
- x[:, :truncation_cutoff] = self.w_avg.lerp(x[:, :truncation_cutoff], truncation_psi)
267
- return x
268
-
269
- def extra_repr(self):
270
- return f'z_dim={self.z_dim:d}, c_dim={self.c_dim:d}, w_dim={self.w_dim:d}, num_ws={self.num_ws:d}'
271
-
272
- #----------------------------------------------------------------------------
273
-
274
- @persistence.persistent_class
275
- class SynthesisLayer(torch.nn.Module):
276
- def __init__(self,
277
- in_channels, # Number of input channels.
278
- out_channels, # Number of output channels.
279
- w_dim, # Intermediate latent (W) dimensionality.
280
- resolution, # Resolution of this layer.
281
- kernel_size = 3, # Convolution kernel size.
282
- up = 1, # Integer upsampling factor.
283
- use_noise = True, # Enable noise input?
284
- activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
285
- resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
286
- conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
287
- channels_last = False, # Use channels_last format for the weights?
288
- ):
289
- super().__init__()
290
- self.in_channels = in_channels
291
- self.out_channels = out_channels
292
- self.w_dim = w_dim
293
- self.resolution = resolution
294
- self.up = up
295
- self.use_noise = use_noise
296
- self.activation = activation
297
- self.conv_clamp = conv_clamp
298
- self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
299
- self.padding = kernel_size // 2
300
- self.act_gain = bias_act.activation_funcs[activation].def_gain
301
-
302
- self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
303
- memory_format = torch.channels_last if channels_last else torch.contiguous_format
304
- self.weight = torch.nn.Parameter(torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
305
- if use_noise:
306
- self.register_buffer('noise_const', torch.randn([resolution, resolution]))
307
- self.noise_strength = torch.nn.Parameter(torch.zeros([]))
308
- self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
309
-
310
- def forward(self, x, w, noise_mode='random', fused_modconv=True, gain=1):
311
- assert noise_mode in ['random', 'const', 'none']
312
- in_resolution = self.resolution // self.up
313
- misc.assert_shape(x, [None, self.in_channels, in_resolution, in_resolution])
314
- styles = self.affine(w)
315
-
316
- noise = None
317
- if self.use_noise and noise_mode == 'random':
318
- noise = torch.randn([x.shape[0], 1, self.resolution, self.resolution], device=x.device) * self.noise_strength
319
- if self.use_noise and noise_mode == 'const':
320
- noise = self.noise_const * self.noise_strength
321
-
322
- flip_weight = (self.up == 1) # slightly faster
323
- x = modulated_conv2d(x=x, weight=self.weight, styles=styles, noise=noise, up=self.up,
324
- padding=self.padding, resample_filter=self.resample_filter, flip_weight=flip_weight, fused_modconv=fused_modconv)
325
-
326
- act_gain = self.act_gain * gain
327
- act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
328
- x = bias_act.bias_act(x, self.bias.to(x.dtype), act=self.activation, gain=act_gain, clamp=act_clamp)
329
- return x
330
-
331
- def extra_repr(self):
332
- return ' '.join([
333
- f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d},',
334
- f'resolution={self.resolution:d}, up={self.up}, activation={self.activation:s}'])
335
-
336
- #----------------------------------------------------------------------------
337
-
338
- @persistence.persistent_class
339
- class ToRGBLayer(torch.nn.Module):
340
- def __init__(self, in_channels, out_channels, w_dim, kernel_size=1, conv_clamp=None, channels_last=False):
341
- super().__init__()
342
- self.in_channels = in_channels
343
- self.out_channels = out_channels
344
- self.w_dim = w_dim
345
- self.conv_clamp = conv_clamp
346
- self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
347
- memory_format = torch.channels_last if channels_last else torch.contiguous_format
348
- self.weight = torch.nn.Parameter(torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
349
- self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
350
- self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
351
-
352
- def forward(self, x, w, fused_modconv=True):
353
- styles = self.affine(w) * self.weight_gain
354
- x = modulated_conv2d(x=x, weight=self.weight, styles=styles, demodulate=False, fused_modconv=fused_modconv)
355
- x = bias_act.bias_act(x, self.bias.to(x.dtype), clamp=self.conv_clamp)
356
- return x
357
-
358
- def extra_repr(self):
359
- return f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d}'
360
-
361
- #----------------------------------------------------------------------------
362
-
363
- @persistence.persistent_class
364
- class SynthesisBlock(torch.nn.Module):
365
- def __init__(self,
366
- in_channels, # Number of input channels, 0 = first block.
367
- out_channels, # Number of output channels.
368
- w_dim, # Intermediate latent (W) dimensionality.
369
- resolution, # Resolution of this block.
370
- img_channels, # Number of output color channels.
371
- is_last, # Is this the last block?
372
- architecture = 'skip', # Architecture: 'orig', 'skip', 'resnet'.
373
- resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
374
- conv_clamp = 256, # Clamp the output of convolution layers to +-X, None = disable clamping.
375
- use_fp16 = False, # Use FP16 for this block?
376
- fp16_channels_last = False, # Use channels-last memory format with FP16?
377
- fused_modconv_default = True, # Default value of fused_modconv. 'inference_only' = True for inference, False for training.
378
- **layer_kwargs, # Arguments for SynthesisLayer.
379
- ):
380
- assert architecture in ['orig', 'skip', 'resnet']
381
- super().__init__()
382
- self.in_channels = in_channels
383
- self.w_dim = w_dim
384
- self.resolution = resolution
385
- self.img_channels = img_channels
386
- self.is_last = is_last
387
- self.architecture = architecture
388
- self.use_fp16 = use_fp16
389
- self.channels_last = (use_fp16 and fp16_channels_last)
390
- self.fused_modconv_default = fused_modconv_default
391
- self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
392
- self.num_conv = 0
393
- self.num_torgb = 0
394
-
395
- if in_channels == 0:
396
- self.const = torch.nn.Parameter(torch.randn([out_channels, resolution, resolution]))
397
-
398
- if in_channels != 0:
399
- self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution, up=2,
400
- resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
401
- self.num_conv += 1
402
-
403
- self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution,
404
- conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
405
- self.num_conv += 1
406
-
407
- if is_last or architecture == 'skip':
408
- self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim,
409
- conv_clamp=conv_clamp, channels_last=self.channels_last)
410
- self.num_torgb += 1
411
-
412
- if in_channels != 0 and architecture == 'resnet':
413
- self.skip = Conv2dLayer(in_channels, out_channels, kernel_size=1, bias=False, up=2,
414
- resample_filter=resample_filter, channels_last=self.channels_last)
415
-
416
- def forward(self, x, img, ws, force_fp32=False, fused_modconv=None, update_emas=False, **layer_kwargs):
417
- _ = update_emas # unused
418
- misc.assert_shape(ws, [None, self.num_conv + self.num_torgb, self.w_dim])
419
- w_iter = iter(ws.unbind(dim=1))
420
- if ws.device.type != 'cuda':
421
- force_fp32 = True
422
- dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
423
- memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
424
- if fused_modconv is None:
425
- fused_modconv = self.fused_modconv_default
426
- if fused_modconv == 'inference_only':
427
- fused_modconv = (not self.training)
428
-
429
- # Input.
430
- if self.in_channels == 0:
431
- x = self.const.to(dtype=dtype, memory_format=memory_format)
432
- x = x.unsqueeze(0).repeat([ws.shape[0], 1, 1, 1])
433
- else:
434
- misc.assert_shape(x, [None, self.in_channels, self.resolution // 2, self.resolution // 2])
435
- x = x.to(dtype=dtype, memory_format=memory_format)
436
-
437
- # Main layers.
438
- if self.in_channels == 0:
439
- x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
440
- elif self.architecture == 'resnet':
441
- y = self.skip(x, gain=np.sqrt(0.5))
442
- x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
443
- x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, gain=np.sqrt(0.5), **layer_kwargs)
444
- x = y.add_(x)
445
- else:
446
- x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
447
- x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
448
-
449
- # ToRGB.
450
- if img is not None:
451
- misc.assert_shape(img, [None, self.img_channels, self.resolution // 2, self.resolution // 2])
452
- img = upfirdn2d.upsample2d(img, self.resample_filter)
453
- if self.is_last or self.architecture == 'skip':
454
- y = self.torgb(x, next(w_iter), fused_modconv=fused_modconv)
455
- y = y.to(dtype=torch.float32, memory_format=torch.contiguous_format)
456
- img = img.add_(y) if img is not None else y
457
-
458
- assert x.dtype == dtype
459
- assert img is None or img.dtype == torch.float32
460
- return x, img
461
-
462
- def extra_repr(self):
463
- return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
464
-
465
- #----------------------------------------------------------------------------
466
-
467
- @persistence.persistent_class
468
- class SynthesisNetwork(torch.nn.Module):
469
- def __init__(self,
470
- w_dim, # Intermediate latent (W) dimensionality.
471
- img_resolution, # Output image resolution.
472
- img_channels, # Number of color channels.
473
- channel_base = 32768, # Overall multiplier for the number of channels.
474
- channel_max = 512, # Maximum number of channels in any layer.
475
- num_fp16_res = 4, # Use FP16 for the N highest resolutions.
476
- **block_kwargs, # Arguments for SynthesisBlock.
477
- ):
478
- assert img_resolution >= 4 and img_resolution & (img_resolution - 1) == 0
479
- super().__init__()
480
- self.w_dim = w_dim
481
- self.img_resolution = img_resolution
482
- self.img_resolution_log2 = int(np.log2(img_resolution))
483
- self.img_channels = img_channels
484
- self.num_fp16_res = num_fp16_res
485
- self.block_resolutions = [2 ** i for i in range(2, self.img_resolution_log2 + 1)]
486
- channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions}
487
- fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
488
-
489
- self.num_ws = 0
490
- for res in self.block_resolutions:
491
- in_channels = channels_dict[res // 2] if res > 4 else 0
492
- out_channels = channels_dict[res]
493
- use_fp16 = (res >= fp16_resolution)
494
- is_last = (res == self.img_resolution)
495
- block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res,
496
- img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, **block_kwargs)
497
- self.num_ws += block.num_conv
498
- if is_last:
499
- self.num_ws += block.num_torgb
500
- setattr(self, f'b{res}', block)
501
-
502
- def forward(self, ws, return_feature=False, **block_kwargs):
503
- block_ws = []
504
- features = []
505
- with torch.autograd.profiler.record_function('split_ws'):
506
- misc.assert_shape(ws, [None, self.num_ws, self.w_dim])
507
- ws = ws.to(torch.float32)
508
- w_idx = 0
509
- for res in self.block_resolutions:
510
- block = getattr(self, f'b{res}')
511
- block_ws.append(ws.narrow(1, w_idx, block.num_conv + block.num_torgb))
512
- w_idx += block.num_conv
513
-
514
- x = img = None
515
- for res, cur_ws in zip(self.block_resolutions, block_ws):
516
- block = getattr(self, f'b{res}')
517
- x, img = block(x, img, cur_ws, **block_kwargs)
518
- features.append(x)
519
- if return_feature:
520
- return img, features
521
- else:
522
- return img
523
-
524
- def extra_repr(self):
525
- return ' '.join([
526
- f'w_dim={self.w_dim:d}, num_ws={self.num_ws:d},',
527
- f'img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d},',
528
- f'num_fp16_res={self.num_fp16_res:d}'])
529
-
530
- #----------------------------------------------------------------------------
531
-
532
- @persistence.persistent_class
533
- class Generator(torch.nn.Module):
534
- def __init__(self,
535
- z_dim, # Input latent (Z) dimensionality.
536
- c_dim, # Conditioning label (C) dimensionality.
537
- w_dim, # Intermediate latent (W) dimensionality.
538
- img_resolution, # Output resolution.
539
- img_channels, # Number of output color channels.
540
- mapping_kwargs = {}, # Arguments for MappingNetwork.
541
- synthesis_kwargs = {}, # Arguments for SynthesisNetwork.
542
- resize=None,
543
- # **synthesis_kwargs, # Arguments for SynthesisNetwork.
544
- ):
545
- super().__init__()
546
- self.z_dim = z_dim
547
- self.c_dim = c_dim
548
- self.w_dim = w_dim
549
- self.img_resolution = img_resolution
550
- self.img_channels = img_channels
551
- self.synthesis = SynthesisNetwork(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs)
552
- self.num_ws = self.synthesis.num_ws
553
- self.mapping = MappingNetwork(z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs)
554
- self.resize = resize
555
-
556
- def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, input_is_w=False, return_feature=False, **synthesis_kwargs):
557
- if input_is_w:
558
- ws = z
559
- if ws.dim() == 2:
560
- ws = ws.unsqueeze(1).repeat([1, self.mapping.num_ws, 1])
561
- else:
562
- ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
563
- img = self.synthesis(ws, update_emas=update_emas, return_feature=return_feature, **synthesis_kwargs)
564
- if self.resize is not None:
565
- img = imresize(img, [self.resize, self.resize])
566
- return img
567
-
568
-
569
- def imresize(image, size):
570
- dim = image.dim()
571
- if dim == 3:
572
- image = image.unsqueeze(1)
573
- b, _, h, w = image.shape
574
- if size[0] > h:
575
- image = F.interpolate(image, size, mode='bilinear')
576
- elif size[0] < h:
577
- image = F.interpolate(image, size, mode='area')
578
- if dim == 3:
579
- image = image.squeeze(1)
580
- return image
581
-
582
- #----------------------------------------------------------------------------
583
-
584
- @persistence.persistent_class
585
- class DiscriminatorBlock(torch.nn.Module):
586
- def __init__(self,
587
- in_channels, # Number of input channels, 0 = first block.
588
- tmp_channels, # Number of intermediate channels.
589
- out_channels, # Number of output channels.
590
- resolution, # Resolution of this block.
591
- img_channels, # Number of input color channels.
592
- first_layer_idx, # Index of the first layer.
593
- architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
594
- activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
595
- resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
596
- conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
597
- use_fp16 = False, # Use FP16 for this block?
598
- fp16_channels_last = False, # Use channels-last memory format with FP16?
599
- freeze_layers = 0, # Freeze-D: Number of layers to freeze.
600
- ):
601
- assert in_channels in [0, tmp_channels]
602
- assert architecture in ['orig', 'skip', 'resnet']
603
- super().__init__()
604
- self.in_channels = in_channels
605
- self.resolution = resolution
606
- self.img_channels = img_channels
607
- self.first_layer_idx = first_layer_idx
608
- self.architecture = architecture
609
- self.use_fp16 = use_fp16
610
- self.channels_last = (use_fp16 and fp16_channels_last)
611
- self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
612
-
613
- self.num_layers = 0
614
- def trainable_gen():
615
- while True:
616
- layer_idx = self.first_layer_idx + self.num_layers
617
- trainable = (layer_idx >= freeze_layers)
618
- self.num_layers += 1
619
- yield trainable
620
- trainable_iter = trainable_gen()
621
-
622
- if in_channels == 0 or architecture == 'skip':
623
- self.fromrgb = Conv2dLayer(img_channels, tmp_channels, kernel_size=1, activation=activation,
624
- trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
625
-
626
- self.conv0 = Conv2dLayer(tmp_channels, tmp_channels, kernel_size=3, activation=activation,
627
- trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
628
-
629
- self.conv1 = Conv2dLayer(tmp_channels, out_channels, kernel_size=3, activation=activation, down=2,
630
- trainable=next(trainable_iter), resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last)
631
-
632
- if architecture == 'resnet':
633
- self.skip = Conv2dLayer(tmp_channels, out_channels, kernel_size=1, bias=False, down=2,
634
- trainable=next(trainable_iter), resample_filter=resample_filter, channels_last=self.channels_last)
635
-
636
- def forward(self, x, img, force_fp32=False):
637
- if (x if x is not None else img).device.type != 'cuda':
638
- force_fp32 = True
639
- dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
640
- memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
641
-
642
- # Input.
643
- if x is not None:
644
- misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution])
645
- x = x.to(dtype=dtype, memory_format=memory_format)
646
-
647
- # FromRGB.
648
- if self.in_channels == 0 or self.architecture == 'skip':
649
- misc.assert_shape(img, [None, self.img_channels, self.resolution, self.resolution])
650
- img = img.to(dtype=dtype, memory_format=memory_format)
651
- y = self.fromrgb(img)
652
- x = x + y if x is not None else y
653
- img = upfirdn2d.downsample2d(img, self.resample_filter) if self.architecture == 'skip' else None
654
-
655
- # Main layers.
656
- if self.architecture == 'resnet':
657
- y = self.skip(x, gain=np.sqrt(0.5))
658
- x = self.conv0(x)
659
- x = self.conv1(x, gain=np.sqrt(0.5))
660
- x = y.add_(x)
661
- else:
662
- x = self.conv0(x)
663
- x = self.conv1(x)
664
-
665
- assert x.dtype == dtype
666
- return x, img
667
-
668
- def extra_repr(self):
669
- return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
670
-
671
- #----------------------------------------------------------------------------
672
-
673
- @persistence.persistent_class
674
- class MinibatchStdLayer(torch.nn.Module):
675
- def __init__(self, group_size, num_channels=1):
676
- super().__init__()
677
- self.group_size = group_size
678
- self.num_channels = num_channels
679
-
680
- def forward(self, x):
681
- N, C, H, W = x.shape
682
- with misc.suppress_tracer_warnings(): # as_tensor results are registered as constants
683
- G = torch.min(torch.as_tensor(self.group_size), torch.as_tensor(N)) if self.group_size is not None else N
684
- F = self.num_channels
685
- c = C // F
686
-
687
- y = x.reshape(G, -1, F, c, H, W) # [GnFcHW] Split minibatch N into n groups of size G, and channels C into F groups of size c.
688
- y = y - y.mean(dim=0) # [GnFcHW] Subtract mean over group.
689
- y = y.square().mean(dim=0) # [nFcHW] Calc variance over group.
690
- y = (y + 1e-8).sqrt() # [nFcHW] Calc stddev over group.
691
- y = y.mean(dim=[2,3,4]) # [nF] Take average over channels and pixels.
692
- y = y.reshape(-1, F, 1, 1) # [nF11] Add missing dimensions.
693
- y = y.repeat(G, 1, H, W) # [NFHW] Replicate over group and pixels.
694
- x = torch.cat([x, y], dim=1) # [NCHW] Append to input as new channels.
695
- return x
696
-
697
- def extra_repr(self):
698
- return f'group_size={self.group_size}, num_channels={self.num_channels:d}'
699
-
700
- #----------------------------------------------------------------------------
701
-
702
- @persistence.persistent_class
703
- class DiscriminatorEpilogue(torch.nn.Module):
704
- def __init__(self,
705
- in_channels, # Number of input channels.
706
- cmap_dim, # Dimensionality of mapped conditioning label, 0 = no label.
707
- resolution, # Resolution of this block.
708
- img_channels, # Number of input color channels.
709
- architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
710
- mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, None = entire minibatch.
711
- mbstd_num_channels = 1, # Number of features for the minibatch standard deviation layer, 0 = disable.
712
- activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
713
- conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
714
- ):
715
- assert architecture in ['orig', 'skip', 'resnet']
716
- super().__init__()
717
- self.in_channels = in_channels
718
- self.cmap_dim = cmap_dim
719
- self.resolution = resolution
720
- self.img_channels = img_channels
721
- self.architecture = architecture
722
-
723
- if architecture == 'skip':
724
- self.fromrgb = Conv2dLayer(img_channels, in_channels, kernel_size=1, activation=activation)
725
- self.mbstd = MinibatchStdLayer(group_size=mbstd_group_size, num_channels=mbstd_num_channels) if mbstd_num_channels > 0 else None
726
- self.conv = Conv2dLayer(in_channels + mbstd_num_channels, in_channels, kernel_size=3, activation=activation, conv_clamp=conv_clamp)
727
- self.fc = FullyConnectedLayer(in_channels * (resolution ** 2), in_channels, activation=activation)
728
- self.out = FullyConnectedLayer(in_channels, 1 if cmap_dim == 0 else cmap_dim)
729
-
730
- def forward(self, x, img, cmap, force_fp32=False):
731
- misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution]) # [NCHW]
732
- _ = force_fp32 # unused
733
- dtype = torch.float32
734
- memory_format = torch.contiguous_format
735
-
736
- # FromRGB.
737
- x = x.to(dtype=dtype, memory_format=memory_format)
738
- if self.architecture == 'skip':
739
- misc.assert_shape(img, [None, self.img_channels, self.resolution, self.resolution])
740
- img = img.to(dtype=dtype, memory_format=memory_format)
741
- x = x + self.fromrgb(img)
742
-
743
- # Main layers.
744
- if self.mbstd is not None:
745
- x = self.mbstd(x)
746
- x = self.conv(x)
747
- x = self.fc(x.flatten(1))
748
- x = self.out(x)
749
-
750
- # Conditioning.
751
- if self.cmap_dim > 0:
752
- misc.assert_shape(cmap, [None, self.cmap_dim])
753
- x = (x * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim))
754
-
755
- assert x.dtype == dtype
756
- return x
757
-
758
- def extra_repr(self):
759
- return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
760
-
761
- #----------------------------------------------------------------------------
762
-
763
- @persistence.persistent_class
764
- class Discriminator(torch.nn.Module):
765
- def __init__(self,
766
- c_dim, # Conditioning label (C) dimensionality.
767
- img_resolution, # Input resolution.
768
- img_channels, # Number of input color channels.
769
- architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
770
- channel_base = 32768, # Overall multiplier for the number of channels.
771
- channel_max = 512, # Maximum number of channels in any layer.
772
- num_fp16_res = 4, # Use FP16 for the N highest resolutions.
773
- conv_clamp = 256, # Clamp the output of convolution layers to +-X, None = disable clamping.
774
- cmap_dim = None, # Dimensionality of mapped conditioning label, None = default.
775
- block_kwargs = {}, # Arguments for DiscriminatorBlock.
776
- mapping_kwargs = {}, # Arguments for MappingNetwork.
777
- epilogue_kwargs = {}, # Arguments for DiscriminatorEpilogue.
778
- ):
779
- super().__init__()
780
- self.c_dim = c_dim
781
- self.img_resolution = img_resolution
782
- self.img_resolution_log2 = int(np.log2(img_resolution))
783
- self.img_channels = img_channels
784
- self.block_resolutions = [2 ** i for i in range(self.img_resolution_log2, 2, -1)]
785
- channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions + [4]}
786
- fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
787
-
788
- if cmap_dim is None:
789
- cmap_dim = channels_dict[4]
790
- if c_dim == 0:
791
- cmap_dim = 0
792
-
793
- common_kwargs = dict(img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp)
794
- cur_layer_idx = 0
795
- for res in self.block_resolutions:
796
- in_channels = channels_dict[res] if res < img_resolution else 0
797
- tmp_channels = channels_dict[res]
798
- out_channels = channels_dict[res // 2]
799
- use_fp16 = (res >= fp16_resolution)
800
- block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res,
801
- first_layer_idx=cur_layer_idx, use_fp16=use_fp16, **block_kwargs, **common_kwargs)
802
- setattr(self, f'b{res}', block)
803
- cur_layer_idx += block.num_layers
804
- if c_dim > 0:
805
- self.mapping = MappingNetwork(z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, **mapping_kwargs)
806
- self.b4 = DiscriminatorEpilogue(channels_dict[4], cmap_dim=cmap_dim, resolution=4, **epilogue_kwargs, **common_kwargs)
807
-
808
- def forward(self, img, c, update_emas=False, **block_kwargs):
809
- _ = update_emas # unused
810
- x = None
811
- for res in self.block_resolutions:
812
- block = getattr(self, f'b{res}')
813
- x, img = block(x, img, **block_kwargs)
814
-
815
- cmap = None
816
- if self.c_dim > 0:
817
- cmap = self.mapping(None, c)
818
- x = self.b4(x, img, cmap)
819
- return x
820
-
821
- def extra_repr(self):
822
- return f'c_dim={self.c_dim:d}, img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d}'
823
-
824
- #----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EPFL-VILAB/MultiMAE/mask2former/data/datasets/register_ade20k_full.py DELETED
@@ -1,964 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import os
3
-
4
- from detectron2.data import DatasetCatalog, MetadataCatalog
5
- from detectron2.data.datasets import load_sem_seg
6
-
7
- ADE20K_SEM_SEG_FULL_CATEGORIES = [
8
- {"name": "wall", "id": 2978, "trainId": 0},
9
- {"name": "building, edifice", "id": 312, "trainId": 1},
10
- {"name": "sky", "id": 2420, "trainId": 2},
11
- {"name": "tree", "id": 2855, "trainId": 3},
12
- {"name": "road, route", "id": 2131, "trainId": 4},
13
- {"name": "floor, flooring", "id": 976, "trainId": 5},
14
- {"name": "ceiling", "id": 447, "trainId": 6},
15
- {"name": "bed", "id": 165, "trainId": 7},
16
- {"name": "sidewalk, pavement", "id": 2377, "trainId": 8},
17
- {"name": "earth, ground", "id": 838, "trainId": 9},
18
- {"name": "cabinet", "id": 350, "trainId": 10},
19
- {"name": "person, individual, someone, somebody, mortal, soul", "id": 1831, "trainId": 11},
20
- {"name": "grass", "id": 1125, "trainId": 12},
21
- {"name": "windowpane, window", "id": 3055, "trainId": 13},
22
- {"name": "car, auto, automobile, machine, motorcar", "id": 401, "trainId": 14},
23
- {"name": "mountain, mount", "id": 1610, "trainId": 15},
24
- {"name": "plant, flora, plant life", "id": 1910, "trainId": 16},
25
- {"name": "table", "id": 2684, "trainId": 17},
26
- {"name": "chair", "id": 471, "trainId": 18},
27
- {"name": "curtain, drape, drapery, mantle, pall", "id": 687, "trainId": 19},
28
- {"name": "door", "id": 774, "trainId": 20},
29
- {"name": "sofa, couch, lounge", "id": 2473, "trainId": 21},
30
- {"name": "sea", "id": 2264, "trainId": 22},
31
- {"name": "painting, picture", "id": 1735, "trainId": 23},
32
- {"name": "water", "id": 2994, "trainId": 24},
33
- {"name": "mirror", "id": 1564, "trainId": 25},
34
- {"name": "house", "id": 1276, "trainId": 26},
35
- {"name": "rug, carpet, carpeting", "id": 2178, "trainId": 27},
36
- {"name": "shelf", "id": 2329, "trainId": 28},
37
- {"name": "armchair", "id": 57, "trainId": 29},
38
- {"name": "fence, fencing", "id": 907, "trainId": 30},
39
- {"name": "field", "id": 913, "trainId": 31},
40
- {"name": "lamp", "id": 1395, "trainId": 32},
41
- {"name": "rock, stone", "id": 2138, "trainId": 33},
42
- {"name": "seat", "id": 2272, "trainId": 34},
43
- {"name": "river", "id": 2128, "trainId": 35},
44
- {"name": "desk", "id": 724, "trainId": 36},
45
- {"name": "bathtub, bathing tub, bath, tub", "id": 155, "trainId": 37},
46
- {"name": "railing, rail", "id": 2053, "trainId": 38},
47
- {"name": "signboard, sign", "id": 2380, "trainId": 39},
48
- {"name": "cushion", "id": 689, "trainId": 40},
49
- {"name": "path", "id": 1788, "trainId": 41},
50
- {"name": "work surface", "id": 3087, "trainId": 42},
51
- {"name": "stairs, steps", "id": 2530, "trainId": 43},
52
- {"name": "column, pillar", "id": 581, "trainId": 44},
53
- {"name": "sink", "id": 2388, "trainId": 45},
54
- {"name": "wardrobe, closet, press", "id": 2985, "trainId": 46},
55
- {"name": "snow", "id": 2454, "trainId": 47},
56
- {"name": "refrigerator, icebox", "id": 2096, "trainId": 48},
57
- {"name": "base, pedestal, stand", "id": 137, "trainId": 49},
58
- {"name": "bridge, span", "id": 294, "trainId": 50},
59
- {"name": "blind, screen", "id": 212, "trainId": 51},
60
- {"name": "runway", "id": 2185, "trainId": 52},
61
- {"name": "cliff, drop, drop-off", "id": 524, "trainId": 53},
62
- {"name": "sand", "id": 2212, "trainId": 54},
63
- {"name": "fireplace, hearth, open fireplace", "id": 943, "trainId": 55},
64
- {"name": "pillow", "id": 1869, "trainId": 56},
65
- {"name": "screen door, screen", "id": 2251, "trainId": 57},
66
- {"name": "toilet, can, commode, crapper, pot, potty, stool, throne", "id": 2793, "trainId": 58},
67
- {"name": "skyscraper", "id": 2423, "trainId": 59},
68
- {"name": "grandstand, covered stand", "id": 1121, "trainId": 60},
69
- {"name": "box", "id": 266, "trainId": 61},
70
- {"name": "pool table, billiard table, snooker table", "id": 1948, "trainId": 62},
71
- {"name": "palm, palm tree", "id": 1744, "trainId": 63},
72
- {"name": "double door", "id": 783, "trainId": 64},
73
- {"name": "coffee table, cocktail table", "id": 571, "trainId": 65},
74
- {"name": "counter", "id": 627, "trainId": 66},
75
- {"name": "countertop", "id": 629, "trainId": 67},
76
- {"name": "chest of drawers, chest, bureau, dresser", "id": 491, "trainId": 68},
77
- {"name": "kitchen island", "id": 1374, "trainId": 69},
78
- {"name": "boat", "id": 223, "trainId": 70},
79
- {"name": "waterfall, falls", "id": 3016, "trainId": 71},
80
- {
81
- "name": "stove, kitchen stove, range, kitchen range, cooking stove",
82
- "id": 2598,
83
- "trainId": 72,
84
- },
85
- {"name": "flower", "id": 978, "trainId": 73},
86
- {"name": "bookcase", "id": 239, "trainId": 74},
87
- {"name": "controls", "id": 608, "trainId": 75},
88
- {"name": "book", "id": 236, "trainId": 76},
89
- {"name": "stairway, staircase", "id": 2531, "trainId": 77},
90
- {"name": "streetlight, street lamp", "id": 2616, "trainId": 78},
91
- {
92
- "name": "computer, computing machine, computing device, data processor, electronic computer, information processing system",
93
- "id": 591,
94
- "trainId": 79,
95
- },
96
- {
97
- "name": "bus, autobus, coach, charabanc, double-decker, jitney, motorbus, motorcoach, omnibus, passenger vehicle",
98
- "id": 327,
99
- "trainId": 80,
100
- },
101
- {"name": "swivel chair", "id": 2679, "trainId": 81},
102
- {"name": "light, light source", "id": 1451, "trainId": 82},
103
- {"name": "bench", "id": 181, "trainId": 83},
104
- {"name": "case, display case, showcase, vitrine", "id": 420, "trainId": 84},
105
- {"name": "towel", "id": 2821, "trainId": 85},
106
- {"name": "fountain", "id": 1023, "trainId": 86},
107
- {"name": "embankment", "id": 855, "trainId": 87},
108
- {
109
- "name": "television receiver, television, television set, tv, tv set, idiot box, boob tube, telly, goggle box",
110
- "id": 2733,
111
- "trainId": 88,
112
- },
113
- {"name": "van", "id": 2928, "trainId": 89},
114
- {"name": "hill", "id": 1240, "trainId": 90},
115
- {"name": "awning, sunshade, sunblind", "id": 77, "trainId": 91},
116
- {"name": "poster, posting, placard, notice, bill, card", "id": 1969, "trainId": 92},
117
- {"name": "truck, motortruck", "id": 2880, "trainId": 93},
118
- {"name": "airplane, aeroplane, plane", "id": 14, "trainId": 94},
119
- {"name": "pole", "id": 1936, "trainId": 95},
120
- {"name": "tower", "id": 2828, "trainId": 96},
121
- {"name": "court", "id": 631, "trainId": 97},
122
- {"name": "ball", "id": 103, "trainId": 98},
123
- {
124
- "name": "aircraft carrier, carrier, flattop, attack aircraft carrier",
125
- "id": 3144,
126
- "trainId": 99,
127
- },
128
- {"name": "buffet, counter, sideboard", "id": 308, "trainId": 100},
129
- {"name": "hovel, hut, hutch, shack, shanty", "id": 1282, "trainId": 101},
130
- {"name": "apparel, wearing apparel, dress, clothes", "id": 38, "trainId": 102},
131
- {"name": "minibike, motorbike", "id": 1563, "trainId": 103},
132
- {"name": "animal, animate being, beast, brute, creature, fauna", "id": 29, "trainId": 104},
133
- {"name": "chandelier, pendant, pendent", "id": 480, "trainId": 105},
134
- {"name": "step, stair", "id": 2569, "trainId": 106},
135
- {"name": "booth, cubicle, stall, kiosk", "id": 247, "trainId": 107},
136
- {"name": "bicycle, bike, wheel, cycle", "id": 187, "trainId": 108},
137
- {"name": "doorframe, doorcase", "id": 778, "trainId": 109},
138
- {"name": "sconce", "id": 2243, "trainId": 110},
139
- {"name": "pond", "id": 1941, "trainId": 111},
140
- {"name": "trade name, brand name, brand, marque", "id": 2833, "trainId": 112},
141
- {"name": "bannister, banister, balustrade, balusters, handrail", "id": 120, "trainId": 113},
142
- {"name": "bag", "id": 95, "trainId": 114},
143
- {"name": "traffic light, traffic signal, stoplight", "id": 2836, "trainId": 115},
144
- {"name": "gazebo", "id": 1087, "trainId": 116},
145
- {"name": "escalator, moving staircase, moving stairway", "id": 868, "trainId": 117},
146
- {"name": "land, ground, soil", "id": 1401, "trainId": 118},
147
- {"name": "board, plank", "id": 220, "trainId": 119},
148
- {"name": "arcade machine", "id": 47, "trainId": 120},
149
- {"name": "eiderdown, duvet, continental quilt", "id": 843, "trainId": 121},
150
- {"name": "bar", "id": 123, "trainId": 122},
151
- {"name": "stall, stand, sales booth", "id": 2537, "trainId": 123},
152
- {"name": "playground", "id": 1927, "trainId": 124},
153
- {"name": "ship", "id": 2337, "trainId": 125},
154
- {"name": "ottoman, pouf, pouffe, puff, hassock", "id": 1702, "trainId": 126},
155
- {
156
- "name": "ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin",
157
- "id": 64,
158
- "trainId": 127,
159
- },
160
- {"name": "bottle", "id": 249, "trainId": 128},
161
- {"name": "cradle", "id": 642, "trainId": 129},
162
- {"name": "pot, flowerpot", "id": 1981, "trainId": 130},
163
- {
164
- "name": "conveyer belt, conveyor belt, conveyer, conveyor, transporter",
165
- "id": 609,
166
- "trainId": 131,
167
- },
168
- {"name": "train, railroad train", "id": 2840, "trainId": 132},
169
- {"name": "stool", "id": 2586, "trainId": 133},
170
- {"name": "lake", "id": 1393, "trainId": 134},
171
- {"name": "tank, storage tank", "id": 2704, "trainId": 135},
172
- {"name": "ice, water ice", "id": 1304, "trainId": 136},
173
- {"name": "basket, handbasket", "id": 146, "trainId": 137},
174
- {"name": "manhole", "id": 1494, "trainId": 138},
175
- {"name": "tent, collapsible shelter", "id": 2739, "trainId": 139},
176
- {"name": "canopy", "id": 389, "trainId": 140},
177
- {"name": "microwave, microwave oven", "id": 1551, "trainId": 141},
178
- {"name": "barrel, cask", "id": 131, "trainId": 142},
179
- {"name": "dirt track", "id": 738, "trainId": 143},
180
- {"name": "beam", "id": 161, "trainId": 144},
181
- {"name": "dishwasher, dish washer, dishwashing machine", "id": 747, "trainId": 145},
182
- {"name": "plate", "id": 1919, "trainId": 146},
183
- {"name": "screen, crt screen", "id": 3109, "trainId": 147},
184
- {"name": "ruins", "id": 2179, "trainId": 148},
185
- {"name": "washer, automatic washer, washing machine", "id": 2989, "trainId": 149},
186
- {"name": "blanket, cover", "id": 206, "trainId": 150},
187
- {"name": "plaything, toy", "id": 1930, "trainId": 151},
188
- {"name": "food, solid food", "id": 1002, "trainId": 152},
189
- {"name": "screen, silver screen, projection screen", "id": 2254, "trainId": 153},
190
- {"name": "oven", "id": 1708, "trainId": 154},
191
- {"name": "stage", "id": 2526, "trainId": 155},
192
- {"name": "beacon, lighthouse, beacon light, pharos", "id": 160, "trainId": 156},
193
- {"name": "umbrella", "id": 2901, "trainId": 157},
194
- {"name": "sculpture", "id": 2262, "trainId": 158},
195
- {"name": "aqueduct", "id": 44, "trainId": 159},
196
- {"name": "container", "id": 597, "trainId": 160},
197
- {"name": "scaffolding, staging", "id": 2235, "trainId": 161},
198
- {"name": "hood, exhaust hood", "id": 1260, "trainId": 162},
199
- {"name": "curb, curbing, kerb", "id": 682, "trainId": 163},
200
- {"name": "roller coaster", "id": 2151, "trainId": 164},
201
- {"name": "horse, equus caballus", "id": 3107, "trainId": 165},
202
- {"name": "catwalk", "id": 432, "trainId": 166},
203
- {"name": "glass, drinking glass", "id": 1098, "trainId": 167},
204
- {"name": "vase", "id": 2932, "trainId": 168},
205
- {"name": "central reservation", "id": 461, "trainId": 169},
206
- {"name": "carousel", "id": 410, "trainId": 170},
207
- {"name": "radiator", "id": 2046, "trainId": 171},
208
- {"name": "closet", "id": 533, "trainId": 172},
209
- {"name": "machine", "id": 1481, "trainId": 173},
210
- {"name": "pier, wharf, wharfage, dock", "id": 1858, "trainId": 174},
211
- {"name": "fan", "id": 894, "trainId": 175},
212
- {"name": "inflatable bounce game", "id": 1322, "trainId": 176},
213
- {"name": "pitch", "id": 1891, "trainId": 177},
214
- {"name": "paper", "id": 1756, "trainId": 178},
215
- {"name": "arcade, colonnade", "id": 49, "trainId": 179},
216
- {"name": "hot tub", "id": 1272, "trainId": 180},
217
- {"name": "helicopter", "id": 1229, "trainId": 181},
218
- {"name": "tray", "id": 2850, "trainId": 182},
219
- {"name": "partition, divider", "id": 1784, "trainId": 183},
220
- {"name": "vineyard", "id": 2962, "trainId": 184},
221
- {"name": "bowl", "id": 259, "trainId": 185},
222
- {"name": "bullring", "id": 319, "trainId": 186},
223
- {"name": "flag", "id": 954, "trainId": 187},
224
- {"name": "pot", "id": 1974, "trainId": 188},
225
- {"name": "footbridge, overcrossing, pedestrian bridge", "id": 1013, "trainId": 189},
226
- {"name": "shower", "id": 2356, "trainId": 190},
227
- {"name": "bag, traveling bag, travelling bag, grip, suitcase", "id": 97, "trainId": 191},
228
- {"name": "bulletin board, notice board", "id": 318, "trainId": 192},
229
- {"name": "confessional booth", "id": 592, "trainId": 193},
230
- {"name": "trunk, tree trunk, bole", "id": 2885, "trainId": 194},
231
- {"name": "forest", "id": 1017, "trainId": 195},
232
- {"name": "elevator door", "id": 851, "trainId": 196},
233
- {"name": "laptop, laptop computer", "id": 1407, "trainId": 197},
234
- {"name": "instrument panel", "id": 1332, "trainId": 198},
235
- {"name": "bucket, pail", "id": 303, "trainId": 199},
236
- {"name": "tapestry, tapis", "id": 2714, "trainId": 200},
237
- {"name": "platform", "id": 1924, "trainId": 201},
238
- {"name": "jacket", "id": 1346, "trainId": 202},
239
- {"name": "gate", "id": 1081, "trainId": 203},
240
- {"name": "monitor, monitoring device", "id": 1583, "trainId": 204},
241
- {
242
- "name": "telephone booth, phone booth, call box, telephone box, telephone kiosk",
243
- "id": 2727,
244
- "trainId": 205,
245
- },
246
- {"name": "spotlight, spot", "id": 2509, "trainId": 206},
247
- {"name": "ring", "id": 2123, "trainId": 207},
248
- {"name": "control panel", "id": 602, "trainId": 208},
249
- {"name": "blackboard, chalkboard", "id": 202, "trainId": 209},
250
- {"name": "air conditioner, air conditioning", "id": 10, "trainId": 210},
251
- {"name": "chest", "id": 490, "trainId": 211},
252
- {"name": "clock", "id": 530, "trainId": 212},
253
- {"name": "sand dune", "id": 2213, "trainId": 213},
254
- {"name": "pipe, pipage, piping", "id": 1884, "trainId": 214},
255
- {"name": "vault", "id": 2934, "trainId": 215},
256
- {"name": "table football", "id": 2687, "trainId": 216},
257
- {"name": "cannon", "id": 387, "trainId": 217},
258
- {"name": "swimming pool, swimming bath, natatorium", "id": 2668, "trainId": 218},
259
- {"name": "fluorescent, fluorescent fixture", "id": 982, "trainId": 219},
260
- {"name": "statue", "id": 2547, "trainId": 220},
261
- {
262
- "name": "loudspeaker, speaker, speaker unit, loudspeaker system, speaker system",
263
- "id": 1474,
264
- "trainId": 221,
265
- },
266
- {"name": "exhibitor", "id": 877, "trainId": 222},
267
- {"name": "ladder", "id": 1391, "trainId": 223},
268
- {"name": "carport", "id": 414, "trainId": 224},
269
- {"name": "dam", "id": 698, "trainId": 225},
270
- {"name": "pulpit", "id": 2019, "trainId": 226},
271
- {"name": "skylight, fanlight", "id": 2422, "trainId": 227},
272
- {"name": "water tower", "id": 3010, "trainId": 228},
273
- {"name": "grill, grille, grillwork", "id": 1139, "trainId": 229},
274
- {"name": "display board", "id": 753, "trainId": 230},
275
- {"name": "pane, pane of glass, window glass", "id": 1747, "trainId": 231},
276
- {"name": "rubbish, trash, scrap", "id": 2175, "trainId": 232},
277
- {"name": "ice rink", "id": 1301, "trainId": 233},
278
- {"name": "fruit", "id": 1033, "trainId": 234},
279
- {"name": "patio", "id": 1789, "trainId": 235},
280
- {"name": "vending machine", "id": 2939, "trainId": 236},
281
- {"name": "telephone, phone, telephone set", "id": 2730, "trainId": 237},
282
- {"name": "net", "id": 1652, "trainId": 238},
283
- {
284
- "name": "backpack, back pack, knapsack, packsack, rucksack, haversack",
285
- "id": 90,
286
- "trainId": 239,
287
- },
288
- {"name": "jar", "id": 1349, "trainId": 240},
289
- {"name": "track", "id": 2830, "trainId": 241},
290
- {"name": "magazine", "id": 1485, "trainId": 242},
291
- {"name": "shutter", "id": 2370, "trainId": 243},
292
- {"name": "roof", "id": 2155, "trainId": 244},
293
- {"name": "banner, streamer", "id": 118, "trainId": 245},
294
- {"name": "landfill", "id": 1402, "trainId": 246},
295
- {"name": "post", "id": 1957, "trainId": 247},
296
- {"name": "altarpiece, reredos", "id": 3130, "trainId": 248},
297
- {"name": "hat, chapeau, lid", "id": 1197, "trainId": 249},
298
- {"name": "arch, archway", "id": 52, "trainId": 250},
299
- {"name": "table game", "id": 2688, "trainId": 251},
300
- {"name": "bag, handbag, pocketbook, purse", "id": 96, "trainId": 252},
301
- {"name": "document, written document, papers", "id": 762, "trainId": 253},
302
- {"name": "dome", "id": 772, "trainId": 254},
303
- {"name": "pier", "id": 1857, "trainId": 255},
304
- {"name": "shanties", "id": 2315, "trainId": 256},
305
- {"name": "forecourt", "id": 1016, "trainId": 257},
306
- {"name": "crane", "id": 643, "trainId": 258},
307
- {"name": "dog, domestic dog, canis familiaris", "id": 3105, "trainId": 259},
308
- {"name": "piano, pianoforte, forte-piano", "id": 1849, "trainId": 260},
309
- {"name": "drawing", "id": 791, "trainId": 261},
310
- {"name": "cabin", "id": 349, "trainId": 262},
311
- {
312
- "name": "ad, advertisement, advertizement, advertising, advertizing, advert",
313
- "id": 6,
314
- "trainId": 263,
315
- },
316
- {"name": "amphitheater, amphitheatre, coliseum", "id": 3114, "trainId": 264},
317
- {"name": "monument", "id": 1587, "trainId": 265},
318
- {"name": "henhouse", "id": 1233, "trainId": 266},
319
- {"name": "cockpit", "id": 559, "trainId": 267},
320
- {"name": "heater, warmer", "id": 1223, "trainId": 268},
321
- {"name": "windmill, aerogenerator, wind generator", "id": 3049, "trainId": 269},
322
- {"name": "pool", "id": 1943, "trainId": 270},
323
- {"name": "elevator, lift", "id": 853, "trainId": 271},
324
- {"name": "decoration, ornament, ornamentation", "id": 709, "trainId": 272},
325
- {"name": "labyrinth", "id": 1390, "trainId": 273},
326
- {"name": "text, textual matter", "id": 2748, "trainId": 274},
327
- {"name": "printer", "id": 2007, "trainId": 275},
328
- {"name": "mezzanine, first balcony", "id": 1546, "trainId": 276},
329
- {"name": "mattress", "id": 1513, "trainId": 277},
330
- {"name": "straw", "id": 2600, "trainId": 278},
331
- {"name": "stalls", "id": 2538, "trainId": 279},
332
- {"name": "patio, terrace", "id": 1790, "trainId": 280},
333
- {"name": "billboard, hoarding", "id": 194, "trainId": 281},
334
- {"name": "bus stop", "id": 326, "trainId": 282},
335
- {"name": "trouser, pant", "id": 2877, "trainId": 283},
336
- {"name": "console table, console", "id": 594, "trainId": 284},
337
- {"name": "rack", "id": 2036, "trainId": 285},
338
- {"name": "notebook", "id": 1662, "trainId": 286},
339
- {"name": "shrine", "id": 2366, "trainId": 287},
340
- {"name": "pantry", "id": 1754, "trainId": 288},
341
- {"name": "cart", "id": 418, "trainId": 289},
342
- {"name": "steam shovel", "id": 2553, "trainId": 290},
343
- {"name": "porch", "id": 1951, "trainId": 291},
344
- {"name": "postbox, mailbox, letter box", "id": 1963, "trainId": 292},
345
- {"name": "figurine, statuette", "id": 918, "trainId": 293},
346
- {"name": "recycling bin", "id": 2086, "trainId": 294},
347
- {"name": "folding screen", "id": 997, "trainId": 295},
348
- {"name": "telescope", "id": 2731, "trainId": 296},
349
- {"name": "deck chair, beach chair", "id": 704, "trainId": 297},
350
- {"name": "kennel", "id": 1365, "trainId": 298},
351
- {"name": "coffee maker", "id": 569, "trainId": 299},
352
- {"name": "altar, communion table, lord's table", "id": 3108, "trainId": 300},
353
- {"name": "fish", "id": 948, "trainId": 301},
354
- {"name": "easel", "id": 839, "trainId": 302},
355
- {"name": "artificial golf green", "id": 63, "trainId": 303},
356
- {"name": "iceberg", "id": 1305, "trainId": 304},
357
- {"name": "candlestick, candle holder", "id": 378, "trainId": 305},
358
- {"name": "shower stall, shower bath", "id": 2362, "trainId": 306},
359
- {"name": "television stand", "id": 2734, "trainId": 307},
360
- {
361
- "name": "wall socket, wall plug, electric outlet, electrical outlet, outlet, electric receptacle",
362
- "id": 2982,
363
- "trainId": 308,
364
- },
365
- {"name": "skeleton", "id": 2398, "trainId": 309},
366
- {"name": "grand piano, grand", "id": 1119, "trainId": 310},
367
- {"name": "candy, confect", "id": 382, "trainId": 311},
368
- {"name": "grille door", "id": 1141, "trainId": 312},
369
- {"name": "pedestal, plinth, footstall", "id": 1805, "trainId": 313},
370
- {"name": "jersey, t-shirt, tee shirt", "id": 3102, "trainId": 314},
371
- {"name": "shoe", "id": 2341, "trainId": 315},
372
- {"name": "gravestone, headstone, tombstone", "id": 1131, "trainId": 316},
373
- {"name": "shanty", "id": 2316, "trainId": 317},
374
- {"name": "structure", "id": 2626, "trainId": 318},
375
- {"name": "rocking chair, rocker", "id": 3104, "trainId": 319},
376
- {"name": "bird", "id": 198, "trainId": 320},
377
- {"name": "place mat", "id": 1896, "trainId": 321},
378
- {"name": "tomb", "id": 2800, "trainId": 322},
379
- {"name": "big top", "id": 190, "trainId": 323},
380
- {"name": "gas pump, gasoline pump, petrol pump, island dispenser", "id": 3131, "trainId": 324},
381
- {"name": "lockers", "id": 1463, "trainId": 325},
382
- {"name": "cage", "id": 357, "trainId": 326},
383
- {"name": "finger", "id": 929, "trainId": 327},
384
- {"name": "bleachers", "id": 209, "trainId": 328},
385
- {"name": "ferris wheel", "id": 912, "trainId": 329},
386
- {"name": "hairdresser chair", "id": 1164, "trainId": 330},
387
- {"name": "mat", "id": 1509, "trainId": 331},
388
- {"name": "stands", "id": 2539, "trainId": 332},
389
- {"name": "aquarium, fish tank, marine museum", "id": 3116, "trainId": 333},
390
- {"name": "streetcar, tram, tramcar, trolley, trolley car", "id": 2615, "trainId": 334},
391
- {"name": "napkin, table napkin, serviette", "id": 1644, "trainId": 335},
392
- {"name": "dummy", "id": 818, "trainId": 336},
393
- {"name": "booklet, brochure, folder, leaflet, pamphlet", "id": 242, "trainId": 337},
394
- {"name": "sand trap", "id": 2217, "trainId": 338},
395
- {"name": "shop, store", "id": 2347, "trainId": 339},
396
- {"name": "table cloth", "id": 2686, "trainId": 340},
397
- {"name": "service station", "id": 2300, "trainId": 341},
398
- {"name": "coffin", "id": 572, "trainId": 342},
399
- {"name": "drawer", "id": 789, "trainId": 343},
400
- {"name": "cages", "id": 358, "trainId": 344},
401
- {"name": "slot machine, coin machine", "id": 2443, "trainId": 345},
402
- {"name": "balcony", "id": 101, "trainId": 346},
403
- {"name": "volleyball court", "id": 2969, "trainId": 347},
404
- {"name": "table tennis", "id": 2692, "trainId": 348},
405
- {"name": "control table", "id": 606, "trainId": 349},
406
- {"name": "shirt", "id": 2339, "trainId": 350},
407
- {"name": "merchandise, ware, product", "id": 1533, "trainId": 351},
408
- {"name": "railway", "id": 2060, "trainId": 352},
409
- {"name": "parterre", "id": 1782, "trainId": 353},
410
- {"name": "chimney", "id": 495, "trainId": 354},
411
- {"name": "can, tin, tin can", "id": 371, "trainId": 355},
412
- {"name": "tanks", "id": 2707, "trainId": 356},
413
- {"name": "fabric, cloth, material, textile", "id": 889, "trainId": 357},
414
- {"name": "alga, algae", "id": 3156, "trainId": 358},
415
- {"name": "system", "id": 2683, "trainId": 359},
416
- {"name": "map", "id": 1499, "trainId": 360},
417
- {"name": "greenhouse", "id": 1135, "trainId": 361},
418
- {"name": "mug", "id": 1619, "trainId": 362},
419
- {"name": "barbecue", "id": 125, "trainId": 363},
420
- {"name": "trailer", "id": 2838, "trainId": 364},
421
- {"name": "toilet tissue, toilet paper, bathroom tissue", "id": 2792, "trainId": 365},
422
- {"name": "organ", "id": 1695, "trainId": 366},
423
- {"name": "dishrag, dishcloth", "id": 746, "trainId": 367},
424
- {"name": "island", "id": 1343, "trainId": 368},
425
- {"name": "keyboard", "id": 1370, "trainId": 369},
426
- {"name": "trench", "id": 2858, "trainId": 370},
427
- {"name": "basket, basketball hoop, hoop", "id": 145, "trainId": 371},
428
- {"name": "steering wheel, wheel", "id": 2565, "trainId": 372},
429
- {"name": "pitcher, ewer", "id": 1892, "trainId": 373},
430
- {"name": "goal", "id": 1103, "trainId": 374},
431
- {"name": "bread, breadstuff, staff of life", "id": 286, "trainId": 375},
432
- {"name": "beds", "id": 170, "trainId": 376},
433
- {"name": "wood", "id": 3073, "trainId": 377},
434
- {"name": "file cabinet", "id": 922, "trainId": 378},
435
- {"name": "newspaper, paper", "id": 1655, "trainId": 379},
436
- {"name": "motorboat", "id": 1602, "trainId": 380},
437
- {"name": "rope", "id": 2160, "trainId": 381},
438
- {"name": "guitar", "id": 1151, "trainId": 382},
439
- {"name": "rubble", "id": 2176, "trainId": 383},
440
- {"name": "scarf", "id": 2239, "trainId": 384},
441
- {"name": "barrels", "id": 132, "trainId": 385},
442
- {"name": "cap", "id": 394, "trainId": 386},
443
- {"name": "leaves", "id": 1424, "trainId": 387},
444
- {"name": "control tower", "id": 607, "trainId": 388},
445
- {"name": "dashboard", "id": 700, "trainId": 389},
446
- {"name": "bandstand", "id": 116, "trainId": 390},
447
- {"name": "lectern", "id": 1425, "trainId": 391},
448
- {"name": "switch, electric switch, electrical switch", "id": 2676, "trainId": 392},
449
- {"name": "baseboard, mopboard, skirting board", "id": 141, "trainId": 393},
450
- {"name": "shower room", "id": 2360, "trainId": 394},
451
- {"name": "smoke", "id": 2449, "trainId": 395},
452
- {"name": "faucet, spigot", "id": 897, "trainId": 396},
453
- {"name": "bulldozer", "id": 317, "trainId": 397},
454
- {"name": "saucepan", "id": 2228, "trainId": 398},
455
- {"name": "shops", "id": 2351, "trainId": 399},
456
- {"name": "meter", "id": 1543, "trainId": 400},
457
- {"name": "crevasse", "id": 656, "trainId": 401},
458
- {"name": "gear", "id": 1088, "trainId": 402},
459
- {"name": "candelabrum, candelabra", "id": 373, "trainId": 403},
460
- {"name": "sofa bed", "id": 2472, "trainId": 404},
461
- {"name": "tunnel", "id": 2892, "trainId": 405},
462
- {"name": "pallet", "id": 1740, "trainId": 406},
463
- {"name": "wire, conducting wire", "id": 3067, "trainId": 407},
464
- {"name": "kettle, boiler", "id": 1367, "trainId": 408},
465
- {"name": "bidet", "id": 188, "trainId": 409},
466
- {
467
- "name": "baby buggy, baby carriage, carriage, perambulator, pram, stroller, go-cart, pushchair, pusher",
468
- "id": 79,
469
- "trainId": 410,
470
- },
471
- {"name": "music stand", "id": 1633, "trainId": 411},
472
- {"name": "pipe, tube", "id": 1885, "trainId": 412},
473
- {"name": "cup", "id": 677, "trainId": 413},
474
- {"name": "parking meter", "id": 1779, "trainId": 414},
475
- {"name": "ice hockey rink", "id": 1297, "trainId": 415},
476
- {"name": "shelter", "id": 2334, "trainId": 416},
477
- {"name": "weeds", "id": 3027, "trainId": 417},
478
- {"name": "temple", "id": 2735, "trainId": 418},
479
- {"name": "patty, cake", "id": 1791, "trainId": 419},
480
- {"name": "ski slope", "id": 2405, "trainId": 420},
481
- {"name": "panel", "id": 1748, "trainId": 421},
482
- {"name": "wallet", "id": 2983, "trainId": 422},
483
- {"name": "wheel", "id": 3035, "trainId": 423},
484
- {"name": "towel rack, towel horse", "id": 2824, "trainId": 424},
485
- {"name": "roundabout", "id": 2168, "trainId": 425},
486
- {"name": "canister, cannister, tin", "id": 385, "trainId": 426},
487
- {"name": "rod", "id": 2148, "trainId": 427},
488
- {"name": "soap dispenser", "id": 2465, "trainId": 428},
489
- {"name": "bell", "id": 175, "trainId": 429},
490
- {"name": "canvas", "id": 390, "trainId": 430},
491
- {"name": "box office, ticket office, ticket booth", "id": 268, "trainId": 431},
492
- {"name": "teacup", "id": 2722, "trainId": 432},
493
- {"name": "trellis", "id": 2857, "trainId": 433},
494
- {"name": "workbench", "id": 3088, "trainId": 434},
495
- {"name": "valley, vale", "id": 2926, "trainId": 435},
496
- {"name": "toaster", "id": 2782, "trainId": 436},
497
- {"name": "knife", "id": 1378, "trainId": 437},
498
- {"name": "podium", "id": 1934, "trainId": 438},
499
- {"name": "ramp", "id": 2072, "trainId": 439},
500
- {"name": "tumble dryer", "id": 2889, "trainId": 440},
501
- {"name": "fireplug, fire hydrant, plug", "id": 944, "trainId": 441},
502
- {"name": "gym shoe, sneaker, tennis shoe", "id": 1158, "trainId": 442},
503
- {"name": "lab bench", "id": 1383, "trainId": 443},
504
- {"name": "equipment", "id": 867, "trainId": 444},
505
- {"name": "rocky formation", "id": 2145, "trainId": 445},
506
- {"name": "plastic", "id": 1915, "trainId": 446},
507
- {"name": "calendar", "id": 361, "trainId": 447},
508
- {"name": "caravan", "id": 402, "trainId": 448},
509
- {"name": "check-in-desk", "id": 482, "trainId": 449},
510
- {"name": "ticket counter", "id": 2761, "trainId": 450},
511
- {"name": "brush", "id": 300, "trainId": 451},
512
- {"name": "mill", "id": 1554, "trainId": 452},
513
- {"name": "covered bridge", "id": 636, "trainId": 453},
514
- {"name": "bowling alley", "id": 260, "trainId": 454},
515
- {"name": "hanger", "id": 1186, "trainId": 455},
516
- {"name": "excavator", "id": 871, "trainId": 456},
517
- {"name": "trestle", "id": 2859, "trainId": 457},
518
- {"name": "revolving door", "id": 2103, "trainId": 458},
519
- {"name": "blast furnace", "id": 208, "trainId": 459},
520
- {"name": "scale, weighing machine", "id": 2236, "trainId": 460},
521
- {"name": "projector", "id": 2012, "trainId": 461},
522
- {"name": "soap", "id": 2462, "trainId": 462},
523
- {"name": "locker", "id": 1462, "trainId": 463},
524
- {"name": "tractor", "id": 2832, "trainId": 464},
525
- {"name": "stretcher", "id": 2617, "trainId": 465},
526
- {"name": "frame", "id": 1024, "trainId": 466},
527
- {"name": "grating", "id": 1129, "trainId": 467},
528
- {"name": "alembic", "id": 18, "trainId": 468},
529
- {"name": "candle, taper, wax light", "id": 376, "trainId": 469},
530
- {"name": "barrier", "id": 134, "trainId": 470},
531
- {"name": "cardboard", "id": 407, "trainId": 471},
532
- {"name": "cave", "id": 434, "trainId": 472},
533
- {"name": "puddle", "id": 2017, "trainId": 473},
534
- {"name": "tarp", "id": 2717, "trainId": 474},
535
- {"name": "price tag", "id": 2005, "trainId": 475},
536
- {"name": "watchtower", "id": 2993, "trainId": 476},
537
- {"name": "meters", "id": 1545, "trainId": 477},
538
- {
539
- "name": "light bulb, lightbulb, bulb, incandescent lamp, electric light, electric-light bulb",
540
- "id": 1445,
541
- "trainId": 478,
542
- },
543
- {"name": "tracks", "id": 2831, "trainId": 479},
544
- {"name": "hair dryer", "id": 1161, "trainId": 480},
545
- {"name": "skirt", "id": 2411, "trainId": 481},
546
- {"name": "viaduct", "id": 2949, "trainId": 482},
547
- {"name": "paper towel", "id": 1769, "trainId": 483},
548
- {"name": "coat", "id": 552, "trainId": 484},
549
- {"name": "sheet", "id": 2327, "trainId": 485},
550
- {"name": "fire extinguisher, extinguisher, asphyxiator", "id": 939, "trainId": 486},
551
- {"name": "water wheel", "id": 3013, "trainId": 487},
552
- {"name": "pottery, clayware", "id": 1986, "trainId": 488},
553
- {"name": "magazine rack", "id": 1486, "trainId": 489},
554
- {"name": "teapot", "id": 2723, "trainId": 490},
555
- {"name": "microphone, mike", "id": 1549, "trainId": 491},
556
- {"name": "support", "id": 2649, "trainId": 492},
557
- {"name": "forklift", "id": 1020, "trainId": 493},
558
- {"name": "canyon", "id": 392, "trainId": 494},
559
- {"name": "cash register, register", "id": 422, "trainId": 495},
560
- {"name": "leaf, leafage, foliage", "id": 1419, "trainId": 496},
561
- {"name": "remote control, remote", "id": 2099, "trainId": 497},
562
- {"name": "soap dish", "id": 2464, "trainId": 498},
563
- {"name": "windshield, windscreen", "id": 3058, "trainId": 499},
564
- {"name": "cat", "id": 430, "trainId": 500},
565
- {"name": "cue, cue stick, pool cue, pool stick", "id": 675, "trainId": 501},
566
- {"name": "vent, venthole, vent-hole, blowhole", "id": 2941, "trainId": 502},
567
- {"name": "videos", "id": 2955, "trainId": 503},
568
- {"name": "shovel", "id": 2355, "trainId": 504},
569
- {"name": "eaves", "id": 840, "trainId": 505},
570
- {"name": "antenna, aerial, transmitting aerial", "id": 32, "trainId": 506},
571
- {"name": "shipyard", "id": 2338, "trainId": 507},
572
- {"name": "hen, biddy", "id": 1232, "trainId": 508},
573
- {"name": "traffic cone", "id": 2834, "trainId": 509},
574
- {"name": "washing machines", "id": 2991, "trainId": 510},
575
- {"name": "truck crane", "id": 2879, "trainId": 511},
576
- {"name": "cds", "id": 444, "trainId": 512},
577
- {"name": "niche", "id": 1657, "trainId": 513},
578
- {"name": "scoreboard", "id": 2246, "trainId": 514},
579
- {"name": "briefcase", "id": 296, "trainId": 515},
580
- {"name": "boot", "id": 245, "trainId": 516},
581
- {"name": "sweater, jumper", "id": 2661, "trainId": 517},
582
- {"name": "hay", "id": 1202, "trainId": 518},
583
- {"name": "pack", "id": 1714, "trainId": 519},
584
- {"name": "bottle rack", "id": 251, "trainId": 520},
585
- {"name": "glacier", "id": 1095, "trainId": 521},
586
- {"name": "pergola", "id": 1828, "trainId": 522},
587
- {"name": "building materials", "id": 311, "trainId": 523},
588
- {"name": "television camera", "id": 2732, "trainId": 524},
589
- {"name": "first floor", "id": 947, "trainId": 525},
590
- {"name": "rifle", "id": 2115, "trainId": 526},
591
- {"name": "tennis table", "id": 2738, "trainId": 527},
592
- {"name": "stadium", "id": 2525, "trainId": 528},
593
- {"name": "safety belt", "id": 2194, "trainId": 529},
594
- {"name": "cover", "id": 634, "trainId": 530},
595
- {"name": "dish rack", "id": 740, "trainId": 531},
596
- {"name": "synthesizer", "id": 2682, "trainId": 532},
597
- {"name": "pumpkin", "id": 2020, "trainId": 533},
598
- {"name": "gutter", "id": 1156, "trainId": 534},
599
- {"name": "fruit stand", "id": 1036, "trainId": 535},
600
- {"name": "ice floe, floe", "id": 1295, "trainId": 536},
601
- {"name": "handle, grip, handgrip, hold", "id": 1181, "trainId": 537},
602
- {"name": "wheelchair", "id": 3037, "trainId": 538},
603
- {"name": "mousepad, mouse mat", "id": 1614, "trainId": 539},
604
- {"name": "diploma", "id": 736, "trainId": 540},
605
- {"name": "fairground ride", "id": 893, "trainId": 541},
606
- {"name": "radio", "id": 2047, "trainId": 542},
607
- {"name": "hotplate", "id": 1274, "trainId": 543},
608
- {"name": "junk", "id": 1361, "trainId": 544},
609
- {"name": "wheelbarrow", "id": 3036, "trainId": 545},
610
- {"name": "stream", "id": 2606, "trainId": 546},
611
- {"name": "toll plaza", "id": 2797, "trainId": 547},
612
- {"name": "punching bag", "id": 2022, "trainId": 548},
613
- {"name": "trough", "id": 2876, "trainId": 549},
614
- {"name": "throne", "id": 2758, "trainId": 550},
615
- {"name": "chair desk", "id": 472, "trainId": 551},
616
- {"name": "weighbridge", "id": 3028, "trainId": 552},
617
- {"name": "extractor fan", "id": 882, "trainId": 553},
618
- {"name": "hanging clothes", "id": 1189, "trainId": 554},
619
- {"name": "dish, dish aerial, dish antenna, saucer", "id": 743, "trainId": 555},
620
- {"name": "alarm clock, alarm", "id": 3122, "trainId": 556},
621
- {"name": "ski lift", "id": 2401, "trainId": 557},
622
- {"name": "chain", "id": 468, "trainId": 558},
623
- {"name": "garage", "id": 1061, "trainId": 559},
624
- {"name": "mechanical shovel", "id": 1523, "trainId": 560},
625
- {"name": "wine rack", "id": 3059, "trainId": 561},
626
- {"name": "tramway", "id": 2843, "trainId": 562},
627
- {"name": "treadmill", "id": 2853, "trainId": 563},
628
- {"name": "menu", "id": 1529, "trainId": 564},
629
- {"name": "block", "id": 214, "trainId": 565},
630
- {"name": "well", "id": 3032, "trainId": 566},
631
- {"name": "witness stand", "id": 3071, "trainId": 567},
632
- {"name": "branch", "id": 277, "trainId": 568},
633
- {"name": "duck", "id": 813, "trainId": 569},
634
- {"name": "casserole", "id": 426, "trainId": 570},
635
- {"name": "frying pan", "id": 1039, "trainId": 571},
636
- {"name": "desk organizer", "id": 727, "trainId": 572},
637
- {"name": "mast", "id": 1508, "trainId": 573},
638
- {"name": "spectacles, specs, eyeglasses, glasses", "id": 2490, "trainId": 574},
639
- {"name": "service elevator", "id": 2299, "trainId": 575},
640
- {"name": "dollhouse", "id": 768, "trainId": 576},
641
- {"name": "hammock", "id": 1172, "trainId": 577},
642
- {"name": "clothes hanging", "id": 537, "trainId": 578},
643
- {"name": "photocopier", "id": 1847, "trainId": 579},
644
- {"name": "notepad", "id": 1664, "trainId": 580},
645
- {"name": "golf cart", "id": 1110, "trainId": 581},
646
- {"name": "footpath", "id": 1014, "trainId": 582},
647
- {"name": "cross", "id": 662, "trainId": 583},
648
- {"name": "baptismal font", "id": 121, "trainId": 584},
649
- {"name": "boiler", "id": 227, "trainId": 585},
650
- {"name": "skip", "id": 2410, "trainId": 586},
651
- {"name": "rotisserie", "id": 2165, "trainId": 587},
652
- {"name": "tables", "id": 2696, "trainId": 588},
653
- {"name": "water mill", "id": 3005, "trainId": 589},
654
- {"name": "helmet", "id": 1231, "trainId": 590},
655
- {"name": "cover curtain", "id": 635, "trainId": 591},
656
- {"name": "brick", "id": 292, "trainId": 592},
657
- {"name": "table runner", "id": 2690, "trainId": 593},
658
- {"name": "ashtray", "id": 65, "trainId": 594},
659
- {"name": "street box", "id": 2607, "trainId": 595},
660
- {"name": "stick", "id": 2574, "trainId": 596},
661
- {"name": "hangers", "id": 1188, "trainId": 597},
662
- {"name": "cells", "id": 456, "trainId": 598},
663
- {"name": "urinal", "id": 2913, "trainId": 599},
664
- {"name": "centerpiece", "id": 459, "trainId": 600},
665
- {"name": "portable fridge", "id": 1955, "trainId": 601},
666
- {"name": "dvds", "id": 827, "trainId": 602},
667
- {"name": "golf club", "id": 1111, "trainId": 603},
668
- {"name": "skirting board", "id": 2412, "trainId": 604},
669
- {"name": "water cooler", "id": 2997, "trainId": 605},
670
- {"name": "clipboard", "id": 528, "trainId": 606},
671
- {"name": "camera, photographic camera", "id": 366, "trainId": 607},
672
- {"name": "pigeonhole", "id": 1863, "trainId": 608},
673
- {"name": "chips", "id": 500, "trainId": 609},
674
- {"name": "food processor", "id": 1001, "trainId": 610},
675
- {"name": "post box", "id": 1958, "trainId": 611},
676
- {"name": "lid", "id": 1441, "trainId": 612},
677
- {"name": "drum", "id": 809, "trainId": 613},
678
- {"name": "blender", "id": 210, "trainId": 614},
679
- {"name": "cave entrance", "id": 435, "trainId": 615},
680
- {"name": "dental chair", "id": 718, "trainId": 616},
681
- {"name": "obelisk", "id": 1674, "trainId": 617},
682
- {"name": "canoe", "id": 388, "trainId": 618},
683
- {"name": "mobile", "id": 1572, "trainId": 619},
684
- {"name": "monitors", "id": 1584, "trainId": 620},
685
- {"name": "pool ball", "id": 1944, "trainId": 621},
686
- {"name": "cue rack", "id": 674, "trainId": 622},
687
- {"name": "baggage carts", "id": 99, "trainId": 623},
688
- {"name": "shore", "id": 2352, "trainId": 624},
689
- {"name": "fork", "id": 1019, "trainId": 625},
690
- {"name": "paper filer", "id": 1763, "trainId": 626},
691
- {"name": "bicycle rack", "id": 185, "trainId": 627},
692
- {"name": "coat rack", "id": 554, "trainId": 628},
693
- {"name": "garland", "id": 1066, "trainId": 629},
694
- {"name": "sports bag", "id": 2508, "trainId": 630},
695
- {"name": "fish tank", "id": 951, "trainId": 631},
696
- {"name": "towel dispenser", "id": 2822, "trainId": 632},
697
- {"name": "carriage", "id": 415, "trainId": 633},
698
- {"name": "brochure", "id": 297, "trainId": 634},
699
- {"name": "plaque", "id": 1914, "trainId": 635},
700
- {"name": "stringer", "id": 2619, "trainId": 636},
701
- {"name": "iron", "id": 1338, "trainId": 637},
702
- {"name": "spoon", "id": 2505, "trainId": 638},
703
- {"name": "flag pole", "id": 955, "trainId": 639},
704
- {"name": "toilet brush", "id": 2786, "trainId": 640},
705
- {"name": "book stand", "id": 238, "trainId": 641},
706
- {"name": "water faucet, water tap, tap, hydrant", "id": 3000, "trainId": 642},
707
- {"name": "ticket office", "id": 2763, "trainId": 643},
708
- {"name": "broom", "id": 299, "trainId": 644},
709
- {"name": "dvd", "id": 822, "trainId": 645},
710
- {"name": "ice bucket", "id": 1288, "trainId": 646},
711
- {"name": "carapace, shell, cuticle, shield", "id": 3101, "trainId": 647},
712
- {"name": "tureen", "id": 2894, "trainId": 648},
713
- {"name": "folders", "id": 992, "trainId": 649},
714
- {"name": "chess", "id": 489, "trainId": 650},
715
- {"name": "root", "id": 2157, "trainId": 651},
716
- {"name": "sewing machine", "id": 2309, "trainId": 652},
717
- {"name": "model", "id": 1576, "trainId": 653},
718
- {"name": "pen", "id": 1810, "trainId": 654},
719
- {"name": "violin", "id": 2964, "trainId": 655},
720
- {"name": "sweatshirt", "id": 2662, "trainId": 656},
721
- {"name": "recycling materials", "id": 2087, "trainId": 657},
722
- {"name": "mitten", "id": 1569, "trainId": 658},
723
- {"name": "chopping board, cutting board", "id": 503, "trainId": 659},
724
- {"name": "mask", "id": 1505, "trainId": 660},
725
- {"name": "log", "id": 1468, "trainId": 661},
726
- {"name": "mouse, computer mouse", "id": 1613, "trainId": 662},
727
- {"name": "grill", "id": 1138, "trainId": 663},
728
- {"name": "hole", "id": 1256, "trainId": 664},
729
- {"name": "target", "id": 2715, "trainId": 665},
730
- {"name": "trash bag", "id": 2846, "trainId": 666},
731
- {"name": "chalk", "id": 477, "trainId": 667},
732
- {"name": "sticks", "id": 2576, "trainId": 668},
733
- {"name": "balloon", "id": 108, "trainId": 669},
734
- {"name": "score", "id": 2245, "trainId": 670},
735
- {"name": "hair spray", "id": 1162, "trainId": 671},
736
- {"name": "roll", "id": 2149, "trainId": 672},
737
- {"name": "runner", "id": 2183, "trainId": 673},
738
- {"name": "engine", "id": 858, "trainId": 674},
739
- {"name": "inflatable glove", "id": 1324, "trainId": 675},
740
- {"name": "games", "id": 1055, "trainId": 676},
741
- {"name": "pallets", "id": 1741, "trainId": 677},
742
- {"name": "baskets", "id": 149, "trainId": 678},
743
- {"name": "coop", "id": 615, "trainId": 679},
744
- {"name": "dvd player", "id": 825, "trainId": 680},
745
- {"name": "rocking horse", "id": 2143, "trainId": 681},
746
- {"name": "buckets", "id": 304, "trainId": 682},
747
- {"name": "bread rolls", "id": 283, "trainId": 683},
748
- {"name": "shawl", "id": 2322, "trainId": 684},
749
- {"name": "watering can", "id": 3017, "trainId": 685},
750
- {"name": "spotlights", "id": 2510, "trainId": 686},
751
- {"name": "post-it", "id": 1960, "trainId": 687},
752
- {"name": "bowls", "id": 265, "trainId": 688},
753
- {"name": "security camera", "id": 2282, "trainId": 689},
754
- {"name": "runner cloth", "id": 2184, "trainId": 690},
755
- {"name": "lock", "id": 1461, "trainId": 691},
756
- {"name": "alarm, warning device, alarm system", "id": 3113, "trainId": 692},
757
- {"name": "side", "id": 2372, "trainId": 693},
758
- {"name": "roulette", "id": 2166, "trainId": 694},
759
- {"name": "bone", "id": 232, "trainId": 695},
760
- {"name": "cutlery", "id": 693, "trainId": 696},
761
- {"name": "pool balls", "id": 1945, "trainId": 697},
762
- {"name": "wheels", "id": 3039, "trainId": 698},
763
- {"name": "spice rack", "id": 2494, "trainId": 699},
764
- {"name": "plant pots", "id": 1908, "trainId": 700},
765
- {"name": "towel ring", "id": 2827, "trainId": 701},
766
- {"name": "bread box", "id": 280, "trainId": 702},
767
- {"name": "video", "id": 2950, "trainId": 703},
768
- {"name": "funfair", "id": 1044, "trainId": 704},
769
- {"name": "breads", "id": 288, "trainId": 705},
770
- {"name": "tripod", "id": 2863, "trainId": 706},
771
- {"name": "ironing board", "id": 1342, "trainId": 707},
772
- {"name": "skimmer", "id": 2409, "trainId": 708},
773
- {"name": "hollow", "id": 1258, "trainId": 709},
774
- {"name": "scratching post", "id": 2249, "trainId": 710},
775
- {"name": "tricycle", "id": 2862, "trainId": 711},
776
- {"name": "file box", "id": 920, "trainId": 712},
777
- {"name": "mountain pass", "id": 1607, "trainId": 713},
778
- {"name": "tombstones", "id": 2802, "trainId": 714},
779
- {"name": "cooker", "id": 610, "trainId": 715},
780
- {"name": "card game, cards", "id": 3129, "trainId": 716},
781
- {"name": "golf bag", "id": 1108, "trainId": 717},
782
- {"name": "towel paper", "id": 2823, "trainId": 718},
783
- {"name": "chaise lounge", "id": 476, "trainId": 719},
784
- {"name": "sun", "id": 2641, "trainId": 720},
785
- {"name": "toilet paper holder", "id": 2788, "trainId": 721},
786
- {"name": "rake", "id": 2070, "trainId": 722},
787
- {"name": "key", "id": 1368, "trainId": 723},
788
- {"name": "umbrella stand", "id": 2903, "trainId": 724},
789
- {"name": "dartboard", "id": 699, "trainId": 725},
790
- {"name": "transformer", "id": 2844, "trainId": 726},
791
- {"name": "fireplace utensils", "id": 942, "trainId": 727},
792
- {"name": "sweatshirts", "id": 2663, "trainId": 728},
793
- {
794
- "name": "cellular telephone, cellular phone, cellphone, cell, mobile phone",
795
- "id": 457,
796
- "trainId": 729,
797
- },
798
- {"name": "tallboy", "id": 2701, "trainId": 730},
799
- {"name": "stapler", "id": 2540, "trainId": 731},
800
- {"name": "sauna", "id": 2231, "trainId": 732},
801
- {"name": "test tube", "id": 2746, "trainId": 733},
802
- {"name": "palette", "id": 1738, "trainId": 734},
803
- {"name": "shopping carts", "id": 2350, "trainId": 735},
804
- {"name": "tools", "id": 2808, "trainId": 736},
805
- {"name": "push button, push, button", "id": 2025, "trainId": 737},
806
- {"name": "star", "id": 2541, "trainId": 738},
807
- {"name": "roof rack", "id": 2156, "trainId": 739},
808
- {"name": "barbed wire", "id": 126, "trainId": 740},
809
- {"name": "spray", "id": 2512, "trainId": 741},
810
- {"name": "ear", "id": 831, "trainId": 742},
811
- {"name": "sponge", "id": 2503, "trainId": 743},
812
- {"name": "racket", "id": 2039, "trainId": 744},
813
- {"name": "tins", "id": 2774, "trainId": 745},
814
- {"name": "eyeglasses", "id": 886, "trainId": 746},
815
- {"name": "file", "id": 919, "trainId": 747},
816
- {"name": "scarfs", "id": 2240, "trainId": 748},
817
- {"name": "sugar bowl", "id": 2636, "trainId": 749},
818
- {"name": "flip flop", "id": 963, "trainId": 750},
819
- {"name": "headstones", "id": 1218, "trainId": 751},
820
- {"name": "laptop bag", "id": 1406, "trainId": 752},
821
- {"name": "leash", "id": 1420, "trainId": 753},
822
- {"name": "climbing frame", "id": 526, "trainId": 754},
823
- {"name": "suit hanger", "id": 2639, "trainId": 755},
824
- {"name": "floor spotlight", "id": 975, "trainId": 756},
825
- {"name": "plate rack", "id": 1921, "trainId": 757},
826
- {"name": "sewer", "id": 2305, "trainId": 758},
827
- {"name": "hard drive", "id": 1193, "trainId": 759},
828
- {"name": "sprinkler", "id": 2517, "trainId": 760},
829
- {"name": "tools box", "id": 2809, "trainId": 761},
830
- {"name": "necklace", "id": 1647, "trainId": 762},
831
- {"name": "bulbs", "id": 314, "trainId": 763},
832
- {"name": "steel industry", "id": 2560, "trainId": 764},
833
- {"name": "club", "id": 545, "trainId": 765},
834
- {"name": "jack", "id": 1345, "trainId": 766},
835
- {"name": "door bars", "id": 775, "trainId": 767},
836
- {
837
- "name": "control panel, instrument panel, control board, board, panel",
838
- "id": 603,
839
- "trainId": 768,
840
- },
841
- {"name": "hairbrush", "id": 1163, "trainId": 769},
842
- {"name": "napkin holder", "id": 1641, "trainId": 770},
843
- {"name": "office", "id": 1678, "trainId": 771},
844
- {"name": "smoke detector", "id": 2450, "trainId": 772},
845
- {"name": "utensils", "id": 2915, "trainId": 773},
846
- {"name": "apron", "id": 42, "trainId": 774},
847
- {"name": "scissors", "id": 2242, "trainId": 775},
848
- {"name": "terminal", "id": 2741, "trainId": 776},
849
- {"name": "grinder", "id": 1143, "trainId": 777},
850
- {"name": "entry phone", "id": 862, "trainId": 778},
851
- {"name": "newspaper stand", "id": 1654, "trainId": 779},
852
- {"name": "pepper shaker", "id": 1826, "trainId": 780},
853
- {"name": "onions", "id": 1689, "trainId": 781},
854
- {
855
- "name": "central processing unit, cpu, c p u , central processor, processor, mainframe",
856
- "id": 3124,
857
- "trainId": 782,
858
- },
859
- {"name": "tape", "id": 2710, "trainId": 783},
860
- {"name": "bat", "id": 152, "trainId": 784},
861
- {"name": "coaster", "id": 549, "trainId": 785},
862
- {"name": "calculator", "id": 360, "trainId": 786},
863
- {"name": "potatoes", "id": 1982, "trainId": 787},
864
- {"name": "luggage rack", "id": 1478, "trainId": 788},
865
- {"name": "salt", "id": 2203, "trainId": 789},
866
- {"name": "street number", "id": 2612, "trainId": 790},
867
- {"name": "viewpoint", "id": 2956, "trainId": 791},
868
- {"name": "sword", "id": 2681, "trainId": 792},
869
- {"name": "cd", "id": 437, "trainId": 793},
870
- {"name": "rowing machine", "id": 2171, "trainId": 794},
871
- {"name": "plug", "id": 1933, "trainId": 795},
872
- {"name": "andiron, firedog, dog, dog-iron", "id": 3110, "trainId": 796},
873
- {"name": "pepper", "id": 1824, "trainId": 797},
874
- {"name": "tongs", "id": 2803, "trainId": 798},
875
- {"name": "bonfire", "id": 234, "trainId": 799},
876
- {"name": "dog dish", "id": 764, "trainId": 800},
877
- {"name": "belt", "id": 177, "trainId": 801},
878
- {"name": "dumbbells", "id": 817, "trainId": 802},
879
- {"name": "videocassette recorder, vcr", "id": 3145, "trainId": 803},
880
- {"name": "hook", "id": 1262, "trainId": 804},
881
- {"name": "envelopes", "id": 864, "trainId": 805},
882
- {"name": "shower faucet", "id": 2359, "trainId": 806},
883
- {"name": "watch", "id": 2992, "trainId": 807},
884
- {"name": "padlock", "id": 1725, "trainId": 808},
885
- {"name": "swimming pool ladder", "id": 2667, "trainId": 809},
886
- {"name": "spanners", "id": 2484, "trainId": 810},
887
- {"name": "gravy boat", "id": 1133, "trainId": 811},
888
- {"name": "notice board", "id": 1667, "trainId": 812},
889
- {"name": "trash bags", "id": 2847, "trainId": 813},
890
- {"name": "fire alarm", "id": 932, "trainId": 814},
891
- {"name": "ladle", "id": 1392, "trainId": 815},
892
- {"name": "stethoscope", "id": 2573, "trainId": 816},
893
- {"name": "rocket", "id": 2140, "trainId": 817},
894
- {"name": "funnel", "id": 1046, "trainId": 818},
895
- {"name": "bowling pins", "id": 264, "trainId": 819},
896
- {"name": "valve", "id": 2927, "trainId": 820},
897
- {"name": "thermometer", "id": 2752, "trainId": 821},
898
- {"name": "cups", "id": 679, "trainId": 822},
899
- {"name": "spice jar", "id": 2493, "trainId": 823},
900
- {"name": "night light", "id": 1658, "trainId": 824},
901
- {"name": "soaps", "id": 2466, "trainId": 825},
902
- {"name": "games table", "id": 1057, "trainId": 826},
903
- {"name": "slotted spoon", "id": 2444, "trainId": 827},
904
- {"name": "reel", "id": 2093, "trainId": 828},
905
- {"name": "scourer", "id": 2248, "trainId": 829},
906
- {"name": "sleeping robe", "id": 2432, "trainId": 830},
907
- {"name": "desk mat", "id": 726, "trainId": 831},
908
- {"name": "dumbbell", "id": 816, "trainId": 832},
909
- {"name": "hammer", "id": 1171, "trainId": 833},
910
- {"name": "tie", "id": 2766, "trainId": 834},
911
- {"name": "typewriter", "id": 2900, "trainId": 835},
912
- {"name": "shaker", "id": 2313, "trainId": 836},
913
- {"name": "cheese dish", "id": 488, "trainId": 837},
914
- {"name": "sea star", "id": 2265, "trainId": 838},
915
- {"name": "racquet", "id": 2043, "trainId": 839},
916
- {"name": "butane gas cylinder", "id": 332, "trainId": 840},
917
- {"name": "paper weight", "id": 1771, "trainId": 841},
918
- {"name": "shaving brush", "id": 2320, "trainId": 842},
919
- {"name": "sunglasses", "id": 2646, "trainId": 843},
920
- {"name": "gear shift", "id": 1089, "trainId": 844},
921
- {"name": "towel rail", "id": 2826, "trainId": 845},
922
- {"name": "adding machine, totalizer, totaliser", "id": 3148, "trainId": 846},
923
- ]
924
-
925
-
926
- def _get_ade20k_full_meta():
927
- # Id 0 is reserved for ignore_label, we change ignore_label for 0
928
- # to 255 in our pre-processing, so all ids are shifted by 1.
929
- stuff_ids = [k["id"] for k in ADE20K_SEM_SEG_FULL_CATEGORIES]
930
- assert len(stuff_ids) == 847, len(stuff_ids)
931
-
932
- # For semantic segmentation, this mapping maps from contiguous stuff id
933
- # (in [0, 91], used in models) to ids in the dataset (used for processing results)
934
- stuff_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(stuff_ids)}
935
- stuff_classes = [k["name"] for k in ADE20K_SEM_SEG_FULL_CATEGORIES]
936
-
937
- ret = {
938
- "stuff_dataset_id_to_contiguous_id": stuff_dataset_id_to_contiguous_id,
939
- "stuff_classes": stuff_classes,
940
- }
941
- return ret
942
-
943
-
944
- def register_all_ade20k_full(root):
945
- root = os.path.join(root, "ADE20K_2021_17_01")
946
- meta = _get_ade20k_full_meta()
947
- for name, dirname in [("train", "training"), ("val", "validation")]:
948
- image_dir = os.path.join(root, "images_detectron2", dirname)
949
- gt_dir = os.path.join(root, "annotations_detectron2", dirname)
950
- name = f"ade20k_full_sem_seg_{name}"
951
- DatasetCatalog.register(
952
- name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="tif", image_ext="jpg")
953
- )
954
- MetadataCatalog.get(name).set(
955
- stuff_classes=meta["stuff_classes"][:],
956
- image_root=image_dir,
957
- sem_seg_root=gt_dir,
958
- evaluator_type="sem_seg",
959
- ignore_label=65535, # NOTE: gt is saved in 16-bit TIFF images
960
- )
961
-
962
-
963
- _root = os.getenv("DETECTRON2_DATASETS", "datasets")
964
- register_all_ade20k_full(_root)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EPFL-VILAB/MultiMAE/utils/random_erasing.py DELETED
@@ -1,103 +0,0 @@
1
- # --------------------------------------------------------
2
- # Based on timm and MAE-priv code bases
3
- # https://github.com/rwightman/pytorch-image-models/tree/master/timm
4
- # https://github.com/BUPT-PRIV/MAE-priv
5
- # --------------------------------------------------------
6
- """ Random Erasing (Cutout)
7
-
8
- Originally inspired by impl at https://github.com/zhunzhong07/Random-Erasing, Apache 2.0
9
- Copyright Zhun Zhong & Liang Zheng
10
-
11
- Hacked together by / Copyright 2020 Ross Wightman
12
- """
13
- import math
14
- import random
15
-
16
- import torch
17
-
18
-
19
- def _get_pixels(per_pixel, rand_color, patch_size, dtype=torch.float32, device='cuda'):
20
- # NOTE I've seen CUDA illegal memory access errors being caused by the normal_()
21
- # paths, flip the order so normal is run on CPU if this becomes a problem
22
- # Issue has been fixed in master https://github.com/pytorch/pytorch/issues/19508
23
- if per_pixel:
24
- return torch.empty(patch_size, dtype=dtype, device=device).normal_()
25
- elif rand_color:
26
- return torch.empty((patch_size[0], 1, 1), dtype=dtype, device=device).normal_()
27
- else:
28
- return torch.zeros((patch_size[0], 1, 1), dtype=dtype, device=device)
29
-
30
-
31
- class RandomErasing:
32
- """ Randomly selects a rectangle region in an image and erases its pixels.
33
- 'Random Erasing Data Augmentation' by Zhong et al.
34
- See https://arxiv.org/pdf/1708.04896.pdf
35
-
36
- This variant of RandomErasing is intended to be applied to either a batch
37
- or single image tensor after it has been normalized by dataset mean and std.
38
- Args:
39
- probability: Probability that the Random Erasing operation will be performed.
40
- min_area: Minimum percentage of erased area wrt input image area.
41
- max_area: Maximum percentage of erased area wrt input image area.
42
- min_aspect: Minimum aspect ratio of erased area.
43
- mode: pixel color mode, one of 'const', 'rand', or 'pixel'
44
- 'const' - erase block is constant color of 0 for all channels
45
- 'rand' - erase block is same per-channel random (normal) color
46
- 'pixel' - erase block is per-pixel random (normal) color
47
- max_count: maximum number of erasing blocks per image, area per box is scaled by count.
48
- per-image count is randomly chosen between 1 and this value.
49
- """
50
-
51
- def __init__(
52
- self,
53
- probability=0.5, min_area=0.02, max_area=1 / 3, min_aspect=0.3, max_aspect=None,
54
- mode='const', min_count=1, max_count=None, num_splits=0, device='cuda'):
55
- self.probability = probability
56
- self.min_area = min_area
57
- self.max_area = max_area
58
- max_aspect = max_aspect or 1 / min_aspect
59
- self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect))
60
- self.min_count = min_count
61
- self.max_count = max_count or min_count
62
- self.num_splits = num_splits
63
- mode = mode.lower()
64
- self.rand_color = False
65
- self.per_pixel = False
66
- if mode == 'rand':
67
- self.rand_color = True # per block random normal
68
- elif mode == 'pixel':
69
- self.per_pixel = True # per pixel random normal
70
- else:
71
- assert not mode or mode == 'const'
72
- self.device = device
73
-
74
- def _erase(self, img, chan, img_h, img_w, dtype):
75
- if random.random() > self.probability:
76
- return
77
- area = img_h * img_w
78
- count = self.min_count if self.min_count == self.max_count else \
79
- random.randint(self.min_count, self.max_count)
80
- for _ in range(count):
81
- for attempt in range(10):
82
- target_area = random.uniform(self.min_area, self.max_area) * area / count
83
- aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
84
- h = int(round(math.sqrt(target_area * aspect_ratio)))
85
- w = int(round(math.sqrt(target_area / aspect_ratio)))
86
- if w < img_w and h < img_h:
87
- top = random.randint(0, img_h - h)
88
- left = random.randint(0, img_w - w)
89
- img[:, top:top + h, left:left + w] = _get_pixels(
90
- self.per_pixel, self.rand_color, (chan, h, w),
91
- dtype=dtype, device=self.device)
92
- break
93
-
94
- def __call__(self, input):
95
- if len(input.size()) == 3:
96
- self._erase(input, *input.size(), input.dtype)
97
- else:
98
- batch_size, chan, img_h, img_w = input.size()
99
- # skip first slice of batch if num_splits is set (for clean portion of samples)
100
- batch_start = batch_size // self.num_splits if self.num_splits > 1 else 0
101
- for i in range(batch_start, batch_size):
102
- self._erase(input[i], chan, img_h, img_w, input.dtype)
103
- return input
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Eddycrack864/Applio-Inference/tools/infer_cli.py DELETED
@@ -1,67 +0,0 @@
1
- import argparse
2
- import os
3
- import sys
4
-
5
- now_dir = os.getcwd()
6
- sys.path.append(now_dir)
7
- from dotenv import load_dotenv
8
- from scipy.io import wavfile
9
-
10
- from configs.config import Config
11
- from infer.modules.vc.modules import VC
12
-
13
- ####
14
- # USAGE
15
- #
16
- # In your Terminal or CMD or whatever
17
-
18
-
19
- def arg_parse() -> tuple:
20
- parser = argparse.ArgumentParser()
21
- parser.add_argument("--f0up_key", type=int, default=0)
22
- parser.add_argument("--input_path", type=str, help="input path")
23
- parser.add_argument("--index_path", type=str, help="index path")
24
- parser.add_argument("--f0method", type=str, default="harvest", help="harvest or pm")
25
- parser.add_argument("--opt_path", type=str, help="opt path")
26
- parser.add_argument("--model_name", type=str, help="store in assets/weight_root")
27
- parser.add_argument("--index_rate", type=float, default=0.66, help="index rate")
28
- parser.add_argument("--device", type=str, help="device")
29
- parser.add_argument("--is_half", type=bool, help="use half -> True")
30
- parser.add_argument("--filter_radius", type=int, default=3, help="filter radius")
31
- parser.add_argument("--resample_sr", type=int, default=0, help="resample sr")
32
- parser.add_argument("--rms_mix_rate", type=float, default=1, help="rms mix rate")
33
- parser.add_argument("--protect", type=float, default=0.33, help="protect")
34
-
35
- args = parser.parse_args()
36
- sys.argv = sys.argv[:1]
37
-
38
- return args
39
-
40
-
41
- def main():
42
- load_dotenv()
43
- args = arg_parse()
44
- config = Config()
45
- config.device = args.device if args.device else config.device
46
- config.is_half = args.is_half if args.is_half else config.is_half
47
- vc = VC(config)
48
- vc.get_vc(args.model_name)
49
- _, wav_opt = vc.vc_single(
50
- 0,
51
- args.input_path,
52
- args.f0up_key,
53
- None,
54
- args.f0method,
55
- args.index_path,
56
- None,
57
- args.index_rate,
58
- args.filter_radius,
59
- args.resample_sr,
60
- args.rms_mix_rate,
61
- args.protect,
62
- )
63
- wavfile.write(args.opt_path, wav_opt[0], wav_opt[1])
64
-
65
-
66
- if __name__ == "__main__":
67
- main()